VirtualBox

Ignore:
Timestamp:
Feb 16, 2025 10:45:02 PM (5 weeks ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167565
Message:

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

File:
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp

    r108243 r108244  
    2525 * SPDX-License-Identifier: GPL-3.0-only
    2626 */
    27 
    28 
    29 /** @page pg_iem    IEM - Interpreted Execution Manager
    30  *
    31  * The interpreted exeuction manager (IEM) is for executing short guest code
    32  * sequences that are causing too many exits / virtualization traps.  It will
    33  * also be used to interpret single instructions, thus replacing the selective
    34  * interpreters in EM and IOM.
    35  *
    36  * Design goals:
    37  *      - Relatively small footprint, although we favour speed and correctness
    38  *        over size.
    39  *      - Reasonably fast.
    40  *      - Correctly handle lock prefixed instructions.
    41  *      - Complete instruction set - eventually.
    42  *      - Refactorable into a recompiler, maybe.
    43  *      - Replace EMInterpret*.
    44  *
    45  * Using the existing disassembler has been considered, however this is thought
    46  * to conflict with speed as the disassembler chews things a bit too much while
    47  * leaving us with a somewhat complicated state to interpret afterwards.
    48  *
    49  *
    50  * The current code is very much work in progress. You've been warned!
    51  *
    52  *
    53  * @section sec_iem_fpu_instr   FPU Instructions
    54  *
    55  * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
    56  * same or equivalent instructions on the host FPU.  To make life easy, we also
    57  * let the FPU prioritize the unmasked exceptions for us.  This however, only
    58  * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
    59  * for FPU exception delivery, because with CR0.NE=0 there is a window where we
    60  * can trigger spurious FPU exceptions.
    61  *
    62  * The guest FPU state is not loaded into the host CPU and kept there till we
    63  * leave IEM because the calling conventions have declared an all year open
    64  * season on much of the FPU state.  For instance an innocent looking call to
    65  * memcpy might end up using a whole bunch of XMM or MM registers if the
    66  * particular implementation finds it worthwhile.
    67  *
    68  *
    69  * @section sec_iem_logging     Logging
    70  *
    71  * The IEM code uses the \"IEM\" log group for the main logging. The different
    72  * logging levels/flags are generally used for the following purposes:
    73  *      - Level 1  (Log)  : Errors, exceptions, interrupts and such major events.
    74  *      - Flow  (LogFlow) : Basic enter/exit IEM state info.
    75  *      - Level 2  (Log2) : ?
    76  *      - Level 3  (Log3) : More detailed enter/exit IEM state info.
    77  *      - Level 4  (Log4) : Decoding mnemonics w/ EIP.
    78  *      - Level 5  (Log5) : Decoding details.
    79  *      - Level 6  (Log6) : Enables/disables the lockstep comparison with REM.
    80  *      - Level 7  (Log7) : iret++ execution logging.
    81  *      - Level 8  (Log8) :
    82  *      - Level 9  (Log9) :
    83  *      - Level 10 (Log10): TLBs.
    84  *      - Level 11 (Log11): Unmasked FPU exceptions.
    85  *
    86  * The \"IEM_MEM\" log group covers most of memory related details logging,
    87  * except for errors and exceptions:
    88  *      - Level 1  (Log)  : Reads.
    89  *      - Level 2  (Log2) : Read fallbacks.
    90  *      - Level 3  (Log3) : MemMap read.
    91  *      - Level 4  (Log4) : MemMap read fallbacks.
    92  *      - Level 5  (Log5) : Writes
    93  *      - Level 6  (Log6) : Write fallbacks.
    94  *      - Level 7  (Log7) : MemMap writes and read-writes.
    95  *      - Level 8  (Log8) : MemMap write and read-write fallbacks.
    96  *      - Level 9  (Log9) : Stack reads.
    97  *      - Level 10 (Log10): Stack read fallbacks.
    98  *      - Level 11 (Log11): Stack writes.
    99  *      - Level 12 (Log12): Stack write fallbacks.
    100  *      - Flow  (LogFlow) :
    101  *
    102  * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
    103  *      - Level 1  (Log)  : Errors and other major events.
    104  *      - Flow (LogFlow)  : Misc flow stuff (cleanup?)
    105  *      - Level 2  (Log2) : VM exits.
    106  *
    107  * The syscall logging level assignments:
    108  *      - Level 1: DOS and BIOS.
    109  *      - Level 2: Windows 3.x
    110  *      - Level 3: Linux.
    111  */
    112 
    113 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
    114 #ifdef _MSC_VER
    115 # pragma warning(disable:4505)
    116 #endif
    11727
    11828
     
    12737#include <VBox/vmm/iem.h>
    12838#include <VBox/vmm/cpum.h>
    129 #include <VBox/vmm/pdmapic.h>
    130 #include <VBox/vmm/pdm.h>
    13139#include <VBox/vmm/pgm.h>
    132 #include <VBox/vmm/iom.h>
    133 #include <VBox/vmm/em.h>
    134 #include <VBox/vmm/hm.h>
    135 #include <VBox/vmm/nem.h>
    136 #include <VBox/vmm/gcm.h>
    137 #include <VBox/vmm/gim.h>
    138 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    139 # include <VBox/vmm/em.h>
    140 # include <VBox/vmm/hm_svm.h>
    141 #endif
    142 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    143 # include <VBox/vmm/hmvmxinline.h>
    144 #endif
    145 #include <VBox/vmm/tm.h>
    14640#include <VBox/vmm/dbgf.h>
    147 #include <VBox/vmm/dbgftrace.h>
    14841#include "IEMInternal.h"
    14942#include <VBox/vmm/vmcc.h>
     
    15144#include <VBox/err.h>
    15245#include <VBox/param.h>
    153 #include <VBox/dis.h>
    154 #include <iprt/asm-math.h>
    155 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    156 # include <iprt/asm-amd64-x86.h>
    157 #elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
    158 # include <iprt/asm-arm.h>
    159 #endif
    16046#include <iprt/assert.h>
    16147#include <iprt/string.h>
     
    16450#include "IEMInline.h"
    16551#ifdef VBOX_VMM_TARGET_X86
    166 # include "target-x86/IEMAllTlbInline-x86.h"
     52# include "IEMAllTlbInline-x86.h"
    16753#endif
    16854
    16955
    170 /*********************************************************************************************************************************
    171 *   Global Variables                                                                                                             *
    172 *********************************************************************************************************************************/
    173 #if defined(IEM_LOG_MEMORY_WRITES)
    174 /** What IEM just wrote. */
    175 uint8_t g_abIemWrote[256];
    176 /** How much IEM just wrote. */
    177 size_t g_cbIemWrote;
    178 #endif
    179 
    180 
    181 /**
    182  * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
    183  * path.
    184  *
    185  * This will also invalidate TLB entries for any pages with active data
    186  * breakpoints on them.
    187  *
    188  * @returns IEM_F_BRK_PENDING_XXX or zero.
    189  * @param   pVCpu               The cross context virtual CPU structure of the
    190  *                              calling thread.
    191  *
    192  * @note    Don't call directly, use iemCalcExecDbgFlags instead.
    193  */
    194 uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
    195 {
    196     uint32_t fExec = 0;
    197 
    198     /*
    199      * Helper for invalidate the data TLB for breakpoint addresses.
    200      *
    201      * This is to make sure any access to the page will always trigger a TLB
    202      * load for as long as the breakpoint is enabled.
    203      */
    204 #ifdef IEM_WITH_DATA_TLB
    205 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
    206         RTGCPTR uTagNoRev = (a_uValue); \
    207         uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
    208         /** @todo do large page accounting */ \
    209         uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
    210         if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
    211             pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
    212         if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
    213             pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
    214     } while (0)
    215 #else
    216 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
    217 #endif
    218 
    219     /*
    220      * Process guest breakpoints.
    221      */
    222 #define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
    223         if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
    224         { \
    225             switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
    226             { \
    227                 case X86_DR7_RW_EO: \
    228                     fExec |= IEM_F_PENDING_BRK_INSTR; \
    229                     break; \
    230                 case X86_DR7_RW_WO: \
    231                 case X86_DR7_RW_RW: \
    232                     fExec |= IEM_F_PENDING_BRK_DATA; \
    233                     INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
    234                     break; \
    235                 case X86_DR7_RW_IO: \
    236                     fExec |= IEM_F_PENDING_BRK_X86_IO; \
    237                     break; \
    238             } \
    239         } \
    240     } while (0)
    241 
    242     uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
    243     if (fGstDr7 & X86_DR7_ENABLED_MASK)
    244     {
    245 /** @todo extract more details here to simplify matching later. */
    246 #ifdef IEM_WITH_DATA_TLB
    247         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
    248 #endif
    249         PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
    250         PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
    251         PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
    252         PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
    253     }
    254 
    255     /*
    256      * Process hypervisor breakpoints.
    257      */
    258     PVMCC const    pVM       = pVCpu->CTX_SUFF(pVM);
    259     uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
    260     if (fHyperDr7 & X86_DR7_ENABLED_MASK)
    261     {
    262 /** @todo extract more details here to simplify matching later. */
    263         PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
    264         PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
    265         PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
    266         PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
    267     }
    268 
    269     return fExec;
    270 }
    271 
    272 
    273 /**
    274  * Initializes the decoder state.
    275  *
    276  * iemReInitDecoder is mostly a copy of this function.
    277  *
    278  * @param   pVCpu               The cross context virtual CPU structure of the
    279  *                              calling thread.
    280  * @param   fExecOpts           Optional execution flags:
    281  *                                  - IEM_F_BYPASS_HANDLERS
    282  *                                  - IEM_F_X86_DISREGARD_LOCK
    283  */
    284 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
    285 {
    286     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    287     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    288     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    289     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    290     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    291     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    292     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    293     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    294     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    295     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    296 
    297     /* Execution state: */
    298     uint32_t fExec;
    299     pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
    300 
    301     /* Decoder state: */
    302     pVCpu->iem.s.enmDefAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    303     pVCpu->iem.s.enmEffAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;
    304     if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
    305     {
    306         pVCpu->iem.s.enmDefOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    307         pVCpu->iem.s.enmEffOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;
    308     }
    309     else
    310     {
    311         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    312         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    313     }
    314     pVCpu->iem.s.fPrefixes          = 0;
    315     pVCpu->iem.s.uRexReg            = 0;
    316     pVCpu->iem.s.uRexB              = 0;
    317     pVCpu->iem.s.uRexIndex          = 0;
    318     pVCpu->iem.s.idxPrefix          = 0;
    319     pVCpu->iem.s.uVex3rdReg         = 0;
    320     pVCpu->iem.s.uVexLength         = 0;
    321     pVCpu->iem.s.fEvexStuff         = 0;
    322     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    323 #ifdef IEM_WITH_CODE_TLB
    324     pVCpu->iem.s.pbInstrBuf         = NULL;
    325     pVCpu->iem.s.offInstrNextByte   = 0;
    326     pVCpu->iem.s.offCurInstrStart   = 0;
    327 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    328     pVCpu->iem.s.offOpcode          = 0;
    329 # endif
    330 # ifdef VBOX_STRICT
    331     pVCpu->iem.s.GCPhysInstrBuf     = NIL_RTGCPHYS;
    332     pVCpu->iem.s.cbInstrBuf         = UINT16_MAX;
    333     pVCpu->iem.s.cbInstrBufTotal    = UINT16_MAX;
    334     pVCpu->iem.s.uInstrBufPc        = UINT64_C(0xc0ffc0ffcff0c0ff);
    335 # endif
    336 #else
    337     pVCpu->iem.s.offOpcode          = 0;
    338     pVCpu->iem.s.cbOpcode           = 0;
    339 #endif
    340     pVCpu->iem.s.offModRm           = 0;
    341     pVCpu->iem.s.cActiveMappings    = 0;
    342     pVCpu->iem.s.iNextMapping       = 0;
    343     pVCpu->iem.s.rcPassUp           = VINF_SUCCESS;
    344 
    345 #ifdef DBGFTRACE_ENABLED
    346     switch (IEM_GET_CPU_MODE(pVCpu))
    347     {
    348         case IEMMODE_64BIT:
    349             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    350             break;
    351         case IEMMODE_32BIT:
    352             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    353             break;
    354         case IEMMODE_16BIT:
    355             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    356             break;
    357     }
    358 #endif
    359 }
    360 
    361 
    362 /**
    363  * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
    364  *
    365  * This is mostly a copy of iemInitDecoder.
    366  *
    367  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    368  */
    369 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
    370 {
    371     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    372     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    373     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    374     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    375     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    376     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    377     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    378     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    379     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    380 
    381     /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
    382     AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
    383               ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
    384 
    385     IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
    386     pVCpu->iem.s.enmDefAddrMode     = enmMode;  /** @todo check if this is correct... */
    387     pVCpu->iem.s.enmEffAddrMode     = enmMode;
    388     if (enmMode != IEMMODE_64BIT)
    389     {
    390         pVCpu->iem.s.enmDefOpSize   = enmMode;  /** @todo check if this is correct... */
    391         pVCpu->iem.s.enmEffOpSize   = enmMode;
    392     }
    393     else
    394     {
    395         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    396         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    397     }
    398     pVCpu->iem.s.fPrefixes          = 0;
    399     pVCpu->iem.s.uRexReg            = 0;
    400     pVCpu->iem.s.uRexB              = 0;
    401     pVCpu->iem.s.uRexIndex          = 0;
    402     pVCpu->iem.s.idxPrefix          = 0;
    403     pVCpu->iem.s.uVex3rdReg         = 0;
    404     pVCpu->iem.s.uVexLength         = 0;
    405     pVCpu->iem.s.fEvexStuff         = 0;
    406     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    407 #ifdef IEM_WITH_CODE_TLB
    408     if (pVCpu->iem.s.pbInstrBuf)
    409     {
    410         uint64_t off = (enmMode == IEMMODE_64BIT
    411                         ? pVCpu->cpum.GstCtx.rip
    412                         : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
    413                      - pVCpu->iem.s.uInstrBufPc;
    414         if (off < pVCpu->iem.s.cbInstrBufTotal)
    415         {
    416             pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
    417             pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
    418             if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
    419                 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
    420             else
    421                 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
    422         }
    423         else
    424         {
    425             pVCpu->iem.s.pbInstrBuf       = NULL;
    426             pVCpu->iem.s.offInstrNextByte = 0;
    427             pVCpu->iem.s.offCurInstrStart = 0;
    428             pVCpu->iem.s.cbInstrBuf       = 0;
    429             pVCpu->iem.s.cbInstrBufTotal  = 0;
    430             pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    431         }
    432     }
    433     else
    434     {
    435         pVCpu->iem.s.offInstrNextByte = 0;
    436         pVCpu->iem.s.offCurInstrStart = 0;
    437         pVCpu->iem.s.cbInstrBuf       = 0;
    438         pVCpu->iem.s.cbInstrBufTotal  = 0;
    439 # ifdef VBOX_STRICT
    440         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    441 # endif
    442     }
    443 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    444     pVCpu->iem.s.offOpcode          = 0;
    445 # endif
    446 #else  /* !IEM_WITH_CODE_TLB */
    447     pVCpu->iem.s.cbOpcode           = 0;
    448     pVCpu->iem.s.offOpcode          = 0;
    449 #endif /* !IEM_WITH_CODE_TLB */
    450     pVCpu->iem.s.offModRm           = 0;
    451     Assert(pVCpu->iem.s.cActiveMappings == 0);
    452     pVCpu->iem.s.iNextMapping       = 0;
    453     Assert(pVCpu->iem.s.rcPassUp   == VINF_SUCCESS);
    454     Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
    455 
    456 #ifdef DBGFTRACE_ENABLED
    457     switch (enmMode)
    458     {
    459         case IEMMODE_64BIT:
    460             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    461             break;
    462         case IEMMODE_32BIT:
    463             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    464             break;
    465         case IEMMODE_16BIT:
    466             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    467             break;
    468     }
    469 #endif
    470 }
    471 
    472 
    473 
     56#ifndef IEM_WITH_CODE_TLB
    47457/**
    47558 * Prefetch opcodes the first time when starting executing.
    47659 *
    47760 * @returns Strict VBox status code.
    478  * @param   pVCpu               The cross context virtual CPU structure of the
    479  *                              calling thread.
    480  * @param   fExecOpts           Optional execution flags:
    481  *                                  - IEM_F_BYPASS_HANDLERS
    482  *                                  - IEM_F_X86_DISREGARD_LOCK
    483  */
    484 static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
    485 {
    486     iemInitDecoder(pVCpu, fExecOpts);
    487 
    488 #ifndef IEM_WITH_CODE_TLB
     61 * @param   pVCpu   The cross context virtual CPU structure of the calling
     62 *                  thread.
     63 */
     64VBOXSTRICTRC iemOpcodeFetchPrefetch(PVMCPUCC pVCpu) RT_NOEXCEPT
     65{
    48966    /*
    49067     * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
     
    616193    }
    617194    pVCpu->iem.s.cbOpcode = cbToTryRead;
     195    return VINF_SUCCESS;
     196}
    618197#endif /* !IEM_WITH_CODE_TLB */
    619     return VINF_SUCCESS;
    620 }
    621 
    622 
    623 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    624 /**
    625  * Worker for iemTlbInvalidateAll.
    626  */
    627 template<bool a_fGlobal>
    628 DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
    629 {
    630     if (!a_fGlobal)
    631         pTlb->cTlsFlushes++;
    632     else
    633         pTlb->cTlsGlobalFlushes++;
    634 
    635     pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
    636     if (RT_LIKELY(pTlb->uTlbRevision != 0))
    637     { /* very likely */ }
    638     else
    639     {
    640         pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
    641         pTlb->cTlbRevisionRollovers++;
    642         unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    643         while (i-- > 0)
    644             pTlb->aEntries[i * 2].uTag = 0;
    645     }
    646 
    647     pTlb->cTlbNonGlobalLargePageCurLoads    = 0;
    648     pTlb->NonGlobalLargePageRange.uLastTag  = 0;
    649     pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
    650 
    651     if (a_fGlobal)
    652     {
    653         pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
    654         if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
    655         { /* very likely */ }
    656         else
    657         {
    658             pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
    659             pTlb->cTlbRevisionRollovers++;
    660             unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    661             while (i-- > 0)
    662                 pTlb->aEntries[i * 2 + 1].uTag = 0;
    663         }
    664 
    665         pTlb->cTlbGlobalLargePageCurLoads    = 0;
    666         pTlb->GlobalLargePageRange.uLastTag  = 0;
    667         pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
    668     }
    669 }
    670 #endif
    671 
    672 
    673 /**
    674  * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
    675  */
    676 template<bool a_fGlobal>
    677 DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
    678 {
    679 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    680     Log10(("IEMTlbInvalidateAll\n"));
    681 
    682 # ifdef IEM_WITH_CODE_TLB
    683     pVCpu->iem.s.cbInstrBufTotal = 0;
    684     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
    685     if (a_fGlobal)
    686         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
    687     else
    688         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
    689 # endif
    690 
    691 # ifdef IEM_WITH_DATA_TLB
    692     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
    693     if (a_fGlobal)
    694         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
    695     else
    696         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
    697 # endif
    698 #else
    699     RT_NOREF(pVCpu);
    700 #endif
    701 }
    702 
    703 
    704 /**
    705  * Invalidates non-global the IEM TLB entries.
    706  *
    707  * This is called internally as well as by PGM when moving GC mappings.
    708  *
    709  * @param   pVCpu       The cross context virtual CPU structure of the calling
    710  *                      thread.
    711  */
    712 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
    713 {
    714     iemTlbInvalidateAll<false>(pVCpu);
    715 }
    716 
    717 
    718 /**
    719  * Invalidates all the IEM TLB entries.
    720  *
    721  * This is called internally as well as by PGM when moving GC mappings.
    722  *
    723  * @param   pVCpu       The cross context virtual CPU structure of the calling
    724  *                      thread.
    725  */
    726 VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
    727 {
    728     iemTlbInvalidateAll<true>(pVCpu);
    729 }
    730 
    731 
    732 /**
    733  * Invalidates a page in the TLBs.
    734  *
    735  * @param   pVCpu       The cross context virtual CPU structure of the calling
    736  *                      thread.
    737  * @param   GCPtr       The address of the page to invalidate
    738  * @thread EMT(pVCpu)
    739  */
    740 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
    741 {
    742     IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
    743 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    744     Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
    745     GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
    746     Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
    747     uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
    748 
    749 # ifdef IEM_WITH_CODE_TLB
    750     iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
    751 # endif
    752 # ifdef IEM_WITH_DATA_TLB
    753     iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
    754 # endif
    755 #else
    756     NOREF(pVCpu); NOREF(GCPtr);
    757 #endif
    758 }
    759 
    760 
    761 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    762 /**
    763  * Invalid both TLBs slow fashion following a rollover.
    764  *
    765  * Worker for IEMTlbInvalidateAllPhysical,
    766  * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
    767  * iemMemMapJmp and others.
    768  *
    769  * @thread EMT(pVCpu)
    770  */
    771 void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT
    772 {
    773     Log10(("iemTlbInvalidateAllPhysicalSlow\n"));
    774     ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    775     ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    776 
    777     unsigned i;
    778 # ifdef IEM_WITH_CODE_TLB
    779     i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
    780     while (i-- > 0)
    781     {
    782         pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3       = NULL;
    783         pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    784                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    785     }
    786     pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
    787     pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    788 # endif
    789 # ifdef IEM_WITH_DATA_TLB
    790     i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
    791     while (i-- > 0)
    792     {
    793         pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3       = NULL;
    794         pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    795                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    796     }
    797     pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
    798     pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    799 # endif
    800 
    801 }
    802 #endif
    803 
    804 
    805 /**
    806  * Invalidates the host physical aspects of the IEM TLBs.
    807  *
    808  * This is called internally as well as by PGM when moving GC mappings.
    809  *
    810  * @param   pVCpu       The cross context virtual CPU structure of the calling
    811  *                      thread.
    812  * @note    Currently not used.
    813  */
    814 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
    815 {
    816 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    817     /* Note! This probably won't end up looking exactly like this, but it give an idea... */
    818     Log10(("IEMTlbInvalidateAllPhysical\n"));
    819 
    820 # ifdef IEM_WITH_CODE_TLB
    821     pVCpu->iem.s.cbInstrBufTotal = 0;
    822 # endif
    823     uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
    824     if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
    825     {
    826         pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
    827         pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    828         pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
    829         pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    830     }
    831     else
    832         iemTlbInvalidateAllPhysicalSlow(pVCpu);
    833 #else
    834     NOREF(pVCpu);
    835 #endif
    836 }
    837 
    838 
    839 /**
    840  * Invalidates the host physical aspects of the IEM TLBs.
    841  *
    842  * This is called internally as well as by PGM when moving GC mappings.
    843  *
    844  * @param   pVM         The cross context VM structure.
    845  * @param   idCpuCaller The ID of the calling EMT if available to the caller,
    846  *                      otherwise NIL_VMCPUID.
    847  * @param   enmReason   The reason we're called.
    848  *
    849  * @remarks Caller holds the PGM lock.
    850  */
    851 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
    852 {
    853 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    854     PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
    855     if (pVCpuCaller)
    856         VMCPU_ASSERT_EMT(pVCpuCaller);
    857     Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
    858 
    859     VMCC_FOR_EACH_VMCPU(pVM)
    860     {
    861 # ifdef IEM_WITH_CODE_TLB
    862         if (pVCpuCaller == pVCpu)
    863             pVCpu->iem.s.cbInstrBufTotal = 0;
    864 # endif
    865 
    866         uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
    867         uint64_t       uTlbPhysRevNew  = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
    868         if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
    869         { /* likely */}
    870         else if (pVCpuCaller != pVCpu)
    871             uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
    872         else
    873         {
    874             iemTlbInvalidateAllPhysicalSlow(pVCpu);
    875             continue;
    876         }
    877         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    878             pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    879 
    880         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    881             pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    882     }
    883     VMCC_FOR_EACH_VMCPU_END(pVM);
    884 
    885 #else
    886     RT_NOREF(pVM, idCpuCaller, enmReason);
    887 #endif
    888 }
    889198
    890199
     
    18281137#endif /* IEM_WITH_SETJMP */
    18291138
    1830 
    1831 
    1832 /** @name   Register Access.
    1833  * @{
    1834  */
    1835 
    1836 /**
    1837  * Adds a 8-bit signed jump offset to RIP/EIP/IP.
    1838  *
    1839  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    1840  * segment limit.
    1841  *
    1842  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1843  * @param   cbInstr             Instruction size.
    1844  * @param   offNextInstr        The offset of the next instruction.
    1845  * @param   enmEffOpSize        Effective operand size.
    1846  */
    1847 VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    1848                                                         IEMMODE enmEffOpSize) RT_NOEXCEPT
    1849 {
    1850     switch (enmEffOpSize)
    1851     {
    1852         case IEMMODE_16BIT:
    1853         {
    1854             uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
    1855             if (RT_LIKELY(   uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
    1856                           || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
    1857                 pVCpu->cpum.GstCtx.rip = uNewIp;
    1858             else
    1859                 return iemRaiseGeneralProtectionFault0(pVCpu);
    1860             break;
    1861         }
    1862 
    1863         case IEMMODE_32BIT:
    1864         {
    1865             Assert(!IEM_IS_64BIT_CODE(pVCpu));
    1866             Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    1867 
    1868             uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
    1869             if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    1870                 pVCpu->cpum.GstCtx.rip = uNewEip;
    1871             else
    1872                 return iemRaiseGeneralProtectionFault0(pVCpu);
    1873             break;
    1874         }
    1875 
    1876         case IEMMODE_64BIT:
    1877         {
    1878             Assert(IEM_IS_64BIT_CODE(pVCpu));
    1879 
    1880             uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    1881             if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    1882                 pVCpu->cpum.GstCtx.rip = uNewRip;
    1883             else
    1884                 return iemRaiseGeneralProtectionFault0(pVCpu);
    1885             break;
    1886         }
    1887 
    1888         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    1889     }
    1890 
    1891 #ifndef IEM_WITH_CODE_TLB
    1892     /* Flush the prefetch buffer. */
    1893     pVCpu->iem.s.cbOpcode = cbInstr;
    1894 #endif
    1895 
    1896     /*
    1897      * Clear RF and finish the instruction (maybe raise #DB).
    1898      */
    1899     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    1900 }
    1901 
    1902 
    1903 /**
    1904  * Adds a 16-bit signed jump offset to RIP/EIP/IP.
    1905  *
    1906  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    1907  * segment limit.
    1908  *
    1909  * @returns Strict VBox status code.
    1910  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1911  * @param   cbInstr             Instruction size.
    1912  * @param   offNextInstr        The offset of the next instruction.
    1913  */
    1914 VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
    1915 {
    1916     Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
    1917 
    1918     uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
    1919     if (RT_LIKELY(   uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
    1920                   || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
    1921         pVCpu->cpum.GstCtx.rip = uNewIp;
    1922     else
    1923         return iemRaiseGeneralProtectionFault0(pVCpu);
    1924 
    1925 #ifndef IEM_WITH_CODE_TLB
    1926     /* Flush the prefetch buffer. */
    1927     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    1928 #endif
    1929 
    1930     /*
    1931      * Clear RF and finish the instruction (maybe raise #DB).
    1932      */
    1933     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    1934 }
    1935 
    1936 
    1937 /**
    1938  * Adds a 32-bit signed jump offset to RIP/EIP/IP.
    1939  *
    1940  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    1941  * segment limit.
    1942  *
    1943  * @returns Strict VBox status code.
    1944  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1945  * @param   cbInstr             Instruction size.
    1946  * @param   offNextInstr        The offset of the next instruction.
    1947  * @param   enmEffOpSize        Effective operand size.
    1948  */
    1949 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
    1950                                                          IEMMODE enmEffOpSize) RT_NOEXCEPT
    1951 {
    1952     if (enmEffOpSize == IEMMODE_32BIT)
    1953     {
    1954         Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
    1955 
    1956         uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
    1957         if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    1958             pVCpu->cpum.GstCtx.rip = uNewEip;
    1959         else
    1960             return iemRaiseGeneralProtectionFault0(pVCpu);
    1961     }
    1962     else
    1963     {
    1964         Assert(enmEffOpSize == IEMMODE_64BIT);
    1965 
    1966         uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    1967         if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    1968             pVCpu->cpum.GstCtx.rip = uNewRip;
    1969         else
    1970             return iemRaiseGeneralProtectionFault0(pVCpu);
    1971     }
    1972 
    1973 #ifndef IEM_WITH_CODE_TLB
    1974     /* Flush the prefetch buffer. */
    1975     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    1976 #endif
    1977 
    1978     /*
    1979      * Clear RF and finish the instruction (maybe raise #DB).
    1980      */
    1981     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    1982 }
    1983 
    1984 /** @}  */
    1985 
    1986 
    1987 /** @name   Memory access.
    1988  *
    1989  * @{
    1990  */
    1991 
    1992 #undef  LOG_GROUP
    1993 #define LOG_GROUP LOG_GROUP_IEM_MEM
    1994 
    1995 #if 0 /*unused*/
    1996 /**
    1997  * Looks up a memory mapping entry.
    1998  *
    1999  * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
    2000  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    2001  * @param   pvMem           The memory address.
    2002  * @param   fAccess         The access to.
    2003  */
    2004 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    2005 {
    2006     Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    2007     fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
    2008     if (   pVCpu->iem.s.aMemMappings[0].pv == pvMem
    2009         && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    2010         return 0;
    2011     if (   pVCpu->iem.s.aMemMappings[1].pv == pvMem
    2012         && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    2013         return 1;
    2014     if (   pVCpu->iem.s.aMemMappings[2].pv == pvMem
    2015         && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    2016         return 2;
    2017     return VERR_NOT_FOUND;
    2018 }
    2019 #endif
    2020 
    2021 /**
    2022  * Finds a free memmap entry when using iNextMapping doesn't work.
    2023  *
    2024  * @returns Memory mapping index, 1024 on failure.
    2025  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2026  */
    2027 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
    2028 {
    2029     /*
    2030      * The easy case.
    2031      */
    2032     if (pVCpu->iem.s.cActiveMappings == 0)
    2033     {
    2034         pVCpu->iem.s.iNextMapping = 1;
    2035         return 0;
    2036     }
    2037 
    2038     /* There should be enough mappings for all instructions. */
    2039     AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
    2040 
    2041     for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
    2042         if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
    2043             return i;
    2044 
    2045     AssertFailedReturn(1024);
    2046 }
    2047 
    2048 
    2049 /**
    2050  * Commits a bounce buffer that needs writing back and unmaps it.
    2051  *
    2052  * @returns Strict VBox status code.
    2053  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    2054  * @param   iMemMap         The index of the buffer to commit.
    2055  * @param   fPostponeFail   Whether we can postpone writer failures to ring-3.
    2056  *                          Always false in ring-3, obviously.
    2057  */
    2058 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
    2059 {
    2060     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    2061     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    2062 #ifdef IN_RING3
    2063     Assert(!fPostponeFail);
    2064     RT_NOREF_PV(fPostponeFail);
    2065 #endif
    2066 
    2067     /*
    2068      * Do the writing.
    2069      */
    2070     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2071     if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
    2072     {
    2073         uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    2074         uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    2075         uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2076         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    2077         {
    2078             /*
    2079              * Carefully and efficiently dealing with access handler return
    2080              * codes make this a little bloated.
    2081              */
    2082             VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
    2083                                                  pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    2084                                                  pbBuf,
    2085                                                  cbFirst,
    2086                                                  PGMACCESSORIGIN_IEM);
    2087             if (rcStrict == VINF_SUCCESS)
    2088             {
    2089                 if (cbSecond)
    2090                 {
    2091                     rcStrict = PGMPhysWrite(pVM,
    2092                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2093                                             pbBuf + cbFirst,
    2094                                             cbSecond,
    2095                                             PGMACCESSORIGIN_IEM);
    2096                     if (rcStrict == VINF_SUCCESS)
    2097                     { /* nothing */ }
    2098                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2099                     {
    2100                         LogEx(LOG_GROUP_IEM,
    2101                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
    2102                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2103                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2104                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2105                     }
    2106 #ifndef IN_RING3
    2107                     else if (fPostponeFail)
    2108                     {
    2109                         LogEx(LOG_GROUP_IEM,
    2110                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    2111                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2112                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2113                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    2114                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    2115                         return iemSetPassUpStatus(pVCpu, rcStrict);
    2116                     }
    2117 #endif
    2118                     else
    2119                     {
    2120                         LogEx(LOG_GROUP_IEM,
    2121                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    2122                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2123                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2124                         return rcStrict;
    2125                     }
    2126                 }
    2127             }
    2128             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2129             {
    2130                 if (!cbSecond)
    2131                 {
    2132                     LogEx(LOG_GROUP_IEM,
    2133                           ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
    2134                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    2135                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2136                 }
    2137                 else
    2138                 {
    2139                     VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
    2140                                                           pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2141                                                           pbBuf + cbFirst,
    2142                                                           cbSecond,
    2143                                                           PGMACCESSORIGIN_IEM);
    2144                     if (rcStrict2 == VINF_SUCCESS)
    2145                     {
    2146                         LogEx(LOG_GROUP_IEM,
    2147                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
    2148                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    2149                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    2150                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2151                     }
    2152                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    2153                     {
    2154                         LogEx(LOG_GROUP_IEM,
    2155                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
    2156                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    2157                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    2158                         PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    2159                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2160                     }
    2161 #ifndef IN_RING3
    2162                     else if (fPostponeFail)
    2163                     {
    2164                         LogEx(LOG_GROUP_IEM,
    2165                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    2166                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2167                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2168                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    2169                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    2170                         return iemSetPassUpStatus(pVCpu, rcStrict);
    2171                     }
    2172 #endif
    2173                     else
    2174                     {
    2175                         LogEx(LOG_GROUP_IEM,
    2176                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    2177                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    2178                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    2179                         return rcStrict2;
    2180                     }
    2181                 }
    2182             }
    2183 #ifndef IN_RING3
    2184             else if (fPostponeFail)
    2185             {
    2186                 LogEx(LOG_GROUP_IEM,
    2187                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    2188                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2189                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2190                 if (!cbSecond)
    2191                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
    2192                 else
    2193                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
    2194                 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    2195                 return iemSetPassUpStatus(pVCpu, rcStrict);
    2196             }
    2197 #endif
    2198             else
    2199             {
    2200                 LogEx(LOG_GROUP_IEM,
    2201                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    2202                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    2203                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    2204                 return rcStrict;
    2205             }
    2206         }
    2207         else
    2208         {
    2209             /*
    2210              * No access handlers, much simpler.
    2211              */
    2212             int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
    2213             if (RT_SUCCESS(rc))
    2214             {
    2215                 if (cbSecond)
    2216                 {
    2217                     rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
    2218                     if (RT_SUCCESS(rc))
    2219                     { /* likely */ }
    2220                     else
    2221                     {
    2222                         LogEx(LOG_GROUP_IEM,
    2223                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    2224                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2225                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
    2226                         return rc;
    2227                     }
    2228                 }
    2229             }
    2230             else
    2231             {
    2232                 LogEx(LOG_GROUP_IEM,
    2233                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    2234                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
    2235                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    2236                 return rc;
    2237             }
    2238         }
    2239     }
    2240 
    2241 #if defined(IEM_LOG_MEMORY_WRITES)
    2242     Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    2243           RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
    2244     if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
    2245         Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2246               RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
    2247               &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
    2248 
    2249     size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    2250     g_cbIemWrote = cbWrote;
    2251     memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
    2252 #endif
    2253 
    2254     /*
    2255      * Free the mapping entry.
    2256      */
    2257     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2258     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2259     pVCpu->iem.s.cActiveMappings--;
    2260     return VINF_SUCCESS;
    2261 }
    2262 
    2263 
    2264 /**
    2265  * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
    2266  * @todo duplicated
    2267  */
    2268 DECL_FORCE_INLINE(uint32_t)
    2269 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
    2270 {
    2271     bool const  fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
    2272     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    2273         return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    2274     return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    2275 }
    2276 
    2277 
    2278 /**
    2279  * iemMemMap worker that deals with a request crossing pages.
    2280  */
    2281 VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
    2282                                             size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
    2283 {
    2284     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
    2285     Assert(cbMem <= GUEST_PAGE_SIZE);
    2286 
    2287     /*
    2288      * Do the address translations.
    2289      */
    2290     uint32_t const cbFirstPage  = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
    2291     RTGCPHYS GCPhysFirst;
    2292     VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
    2293     if (rcStrict != VINF_SUCCESS)
    2294         return rcStrict;
    2295     Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
    2296 
    2297     uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
    2298     RTGCPHYS GCPhysSecond;
    2299     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    2300                                                  cbSecondPage, fAccess, &GCPhysSecond);
    2301     if (rcStrict != VINF_SUCCESS)
    2302         return rcStrict;
    2303     Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
    2304     GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
    2305 
    2306     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2307 
    2308     /*
    2309      * Check for data breakpoints.
    2310      */
    2311     if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
    2312     { /* likely */ }
    2313     else
    2314     {
    2315         uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
    2316         fDataBps         |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    2317                                                       cbSecondPage, fAccess);
    2318         pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
    2319         if (fDataBps > 1)
    2320             LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
    2321                                   fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    2322     }
    2323 
    2324     /*
    2325      * Read in the current memory content if it's a read, execute or partial
    2326      * write access.
    2327      */
    2328     uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2329 
    2330     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    2331     {
    2332         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    2333         {
    2334             /*
    2335              * Must carefully deal with access handler status codes here,
    2336              * makes the code a bit bloated.
    2337              */
    2338             rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
    2339             if (rcStrict == VINF_SUCCESS)
    2340             {
    2341                 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    2342                 if (rcStrict == VINF_SUCCESS)
    2343                 { /*likely */ }
    2344                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2345                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2346                 else
    2347                 {
    2348                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
    2349                                           GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2350                     return rcStrict;
    2351                 }
    2352             }
    2353             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2354             {
    2355                 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    2356                 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    2357                 {
    2358                     PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    2359                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2360                 }
    2361                 else
    2362                 {
    2363                     LogEx(LOG_GROUP_IEM,
    2364                           ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
    2365                            GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
    2366                     return rcStrict2;
    2367                 }
    2368             }
    2369             else
    2370             {
    2371                 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    2372                                       GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    2373                 return rcStrict;
    2374             }
    2375         }
    2376         else
    2377         {
    2378             /*
    2379              * No informational status codes here, much more straight forward.
    2380              */
    2381             int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
    2382             if (RT_SUCCESS(rc))
    2383             {
    2384                 Assert(rc == VINF_SUCCESS);
    2385                 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
    2386                 if (RT_SUCCESS(rc))
    2387                     Assert(rc == VINF_SUCCESS);
    2388                 else
    2389                 {
    2390                     LogEx(LOG_GROUP_IEM,
    2391                           ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
    2392                     return rc;
    2393                 }
    2394             }
    2395             else
    2396             {
    2397                 LogEx(LOG_GROUP_IEM,
    2398                       ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
    2399                 return rc;
    2400             }
    2401         }
    2402     }
    2403 #ifdef VBOX_STRICT
    2404     else
    2405         memset(pbBuf, 0xcc, cbMem);
    2406     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    2407         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    2408 #endif
    2409     AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
    2410 
    2411     /*
    2412      * Commit the bounce buffer entry.
    2413      */
    2414     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    2415     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = GCPhysSecond;
    2416     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbFirstPage;
    2417     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = (uint16_t)cbSecondPage;
    2418     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = false;
    2419     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    2420     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    2421     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    2422     pVCpu->iem.s.cActiveMappings++;
    2423 
    2424     *ppvMem = pbBuf;
    2425     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    2426     return VINF_SUCCESS;
    2427 }
    2428 
    2429 
    2430 /**
    2431  * iemMemMap woker that deals with iemMemPageMap failures.
    2432  */
    2433 VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
    2434                                        RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
    2435 {
    2436     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
    2437 
    2438     /*
    2439      * Filter out conditions we can handle and the ones which shouldn't happen.
    2440      */
    2441     if (   rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
    2442         && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
    2443         && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
    2444     {
    2445         AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
    2446         return rcMap;
    2447     }
    2448     pVCpu->iem.s.cPotentialExits++;
    2449 
    2450     /*
    2451      * Read in the current memory content if it's a read, execute or partial
    2452      * write access.
    2453      */
    2454     uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2455     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    2456     {
    2457         if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
    2458             memset(pbBuf, 0xff, cbMem);
    2459         else
    2460         {
    2461             int rc;
    2462             if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    2463             {
    2464                 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
    2465                 if (rcStrict == VINF_SUCCESS)
    2466                 { /* nothing */ }
    2467                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2468                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2469                 else
    2470                 {
    2471                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    2472                                           GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    2473                     return rcStrict;
    2474                 }
    2475             }
    2476             else
    2477             {
    2478                 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
    2479                 if (RT_SUCCESS(rc))
    2480                 { /* likely */ }
    2481                 else
    2482                 {
    2483                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    2484                                           GCPhysFirst, rc));
    2485                     return rc;
    2486                 }
    2487             }
    2488         }
    2489     }
    2490 #ifdef VBOX_STRICT
    2491     else
    2492         memset(pbBuf, 0xcc, cbMem);
    2493 #endif
    2494 #ifdef VBOX_STRICT
    2495     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    2496         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    2497 #endif
    2498 
    2499     /*
    2500      * Commit the bounce buffer entry.
    2501      */
    2502     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    2503     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = NIL_RTGCPHYS;
    2504     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbMem;
    2505     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = 0;
    2506     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
    2507     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    2508     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    2509     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    2510     pVCpu->iem.s.cActiveMappings++;
    2511 
    2512     *ppvMem = pbBuf;
    2513     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    2514     return VINF_SUCCESS;
    2515 }
    2516 
    2517 
    2518 
    2519 /**
    2520  * Commits the guest memory if bounce buffered and unmaps it.
    2521  *
    2522  * @returns Strict VBox status code.
    2523  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2524  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    2525  */
    2526 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    2527 {
    2528     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    2529     AssertMsgReturn(   (bUnmapInfo & 0x08)
    2530                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    2531                     && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
    2532                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    2533                     VERR_NOT_FOUND);
    2534 
    2535     /* If it's bounce buffered, we may need to write back the buffer. */
    2536     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    2537     {
    2538         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    2539             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    2540     }
    2541     /* Otherwise unlock it. */
    2542     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    2543         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2544 
    2545     /* Free the entry. */
    2546     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2547     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2548     pVCpu->iem.s.cActiveMappings--;
    2549     return VINF_SUCCESS;
    2550 }
    2551 
    2552 
    2553 /**
    2554  * Rolls back the guest memory (conceptually only) and unmaps it.
    2555  *
    2556  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2557  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    2558  */
    2559 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    2560 {
    2561     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    2562     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    2563                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    2564                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    2565                            == ((unsigned)bUnmapInfo >> 4),
    2566                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    2567 
    2568     /* Unlock it if necessary. */
    2569     if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    2570         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2571 
    2572     /* Free the entry. */
    2573     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2574     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2575     pVCpu->iem.s.cActiveMappings--;
    2576 }
    2577 
    2578 #ifdef IEM_WITH_SETJMP
    2579 
    2580 /**
    2581  * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
    2582  *
    2583  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2584  * @param   pvMem               The mapping.
    2585  * @param   fAccess             The kind of access.
    2586  */
    2587 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2588 {
    2589     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    2590     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    2591                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    2592                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    2593                            == ((unsigned)bUnmapInfo >> 4),
    2594                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    2595 
    2596     /* If it's bounce buffered, we may need to write back the buffer. */
    2597     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    2598     {
    2599         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    2600         {
    2601             VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    2602             if (rcStrict == VINF_SUCCESS)
    2603                 return;
    2604             IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    2605         }
    2606     }
    2607     /* Otherwise unlock it. */
    2608     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    2609         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2610 
    2611     /* Free the entry. */
    2612     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2613     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2614     pVCpu->iem.s.cActiveMappings--;
    2615 }
    2616 
    2617 
    2618 /** Fallback for iemMemCommitAndUnmapRwJmp.  */
    2619 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2620 {
    2621     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    2622     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    2623 }
    2624 
    2625 
    2626 /** Fallback for iemMemCommitAndUnmapAtJmp.  */
    2627 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2628 {
    2629     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    2630     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    2631 }
    2632 
    2633 
    2634 /** Fallback for iemMemCommitAndUnmapWoJmp.  */
    2635 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2636 {
    2637     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    2638     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    2639 }
    2640 
    2641 
    2642 /** Fallback for iemMemCommitAndUnmapRoJmp.  */
    2643 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2644 {
    2645     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
    2646     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    2647 }
    2648 
    2649 
    2650 /** Fallback for iemMemRollbackAndUnmapWo.  */
    2651 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    2652 {
    2653     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    2654     iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
    2655 }
    2656 
    2657 #endif /* IEM_WITH_SETJMP */
    2658 
    2659 #ifndef IN_RING3
    2660 /**
    2661  * Commits the guest memory if bounce buffered and unmaps it, if any bounce
    2662  * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
    2663  *
    2664  * Allows the instruction to be completed and retired, while the IEM user will
    2665  * return to ring-3 immediately afterwards and do the postponed writes there.
    2666  *
    2667  * @returns VBox status code (no strict statuses).  Caller must check
    2668  *          VMCPU_FF_IEM before repeating string instructions and similar stuff.
    2669  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2670  * @param   pvMem               The mapping.
    2671  * @param   fAccess             The kind of access.
    2672  */
    2673 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    2674 {
    2675     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    2676     AssertMsgReturn(   (bUnmapInfo & 0x08)
    2677                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    2678                     &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    2679                        == ((unsigned)bUnmapInfo >> 4),
    2680                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    2681                     VERR_NOT_FOUND);
    2682 
    2683     /* If it's bounce buffered, we may need to write back the buffer. */
    2684     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    2685     {
    2686         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    2687             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
    2688     }
    2689     /* Otherwise unlock it. */
    2690     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    2691         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2692 
    2693     /* Free the entry. */
    2694     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2695     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2696     pVCpu->iem.s.cActiveMappings--;
    2697     return VINF_SUCCESS;
    2698 }
    2699 #endif
    2700 
    2701 
    2702 /**
    2703  * Rollbacks mappings, releasing page locks and such.
    2704  *
    2705  * The caller shall only call this after checking cActiveMappings.
    2706  *
    2707  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    2708  */
    2709 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
    2710 {
    2711     Assert(pVCpu->iem.s.cActiveMappings > 0);
    2712 
    2713     uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    2714     while (iMemMap-- > 0)
    2715     {
    2716         uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
    2717         if (fAccess != IEM_ACCESS_INVALID)
    2718         {
    2719             AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
    2720             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2721             if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
    2722                 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2723             AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
    2724                       ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
    2725                        iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
    2726                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
    2727             pVCpu->iem.s.cActiveMappings--;
    2728         }
    2729     }
    2730 }
    2731 
    2732 #undef  LOG_GROUP
    2733 #define LOG_GROUP LOG_GROUP_IEM
    2734 
    2735 /** @} */
    2736 
    2737 
    2738 #ifdef LOG_ENABLED
    2739 /**
    2740  * Logs the current instruction.
    2741  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2742  * @param   fSameCtx    Set if we have the same context information as the VMM,
    2743  *                      clear if we may have already executed an instruction in
    2744  *                      our debug context. When clear, we assume IEMCPU holds
    2745  *                      valid CPU mode info.
    2746  *
    2747  *                      The @a fSameCtx parameter is now misleading and obsolete.
    2748  * @param   pszFunction The IEM function doing the execution.
    2749  */
    2750 static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
    2751 {
    2752 # ifdef IN_RING3
    2753     if (LogIs2Enabled())
    2754     {
    2755         char     szInstr[256];
    2756         uint32_t cbInstr = 0;
    2757         if (fSameCtx)
    2758             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
    2759                                DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
    2760                                szInstr, sizeof(szInstr), &cbInstr);
    2761         else
    2762         {
    2763             uint32_t fFlags = 0;
    2764             switch (IEM_GET_CPU_MODE(pVCpu))
    2765             {
    2766                 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
    2767                 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
    2768                 case IEMMODE_16BIT:
    2769                     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
    2770                         fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
    2771                     else
    2772                         fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
    2773                     break;
    2774             }
    2775             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
    2776                                szInstr, sizeof(szInstr), &cbInstr);
    2777         }
    2778 
    2779         PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    2780         Log2(("**** %s fExec=%x\n"
    2781               " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
    2782               " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
    2783               " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
    2784               " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
    2785               " %s\n"
    2786               , pszFunction, pVCpu->iem.s.fExec,
    2787               pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
    2788               pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
    2789               pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
    2790               pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
    2791               pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
    2792               szInstr));
    2793 
    2794         /* This stuff sucks atm. as it fills the log with MSRs. */
    2795         //if (LogIs3Enabled())
    2796         //    DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
    2797     }
    2798     else
    2799 # endif
    2800         LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
    2801                  pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
    2802     RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
    2803 }
    2804 #endif /* LOG_ENABLED */
    2805 
    2806 
    2807 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2808 /**
    2809  * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
    2810  * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
    2811  *
    2812  * @returns Modified rcStrict.
    2813  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    2814  * @param   rcStrict    The instruction execution status.
    2815  */
    2816 static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
    2817 {
    2818     Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
    2819     if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
    2820     {
    2821         /* VMX preemption timer takes priority over NMI-window exits. */
    2822         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
    2823         {
    2824             rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
    2825             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
    2826         }
    2827         /*
    2828          * Check remaining intercepts.
    2829          *
    2830          * NMI-window and Interrupt-window VM-exits.
    2831          * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
    2832          * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
    2833          *
    2834          * See Intel spec. 26.7.6 "NMI-Window Exiting".
    2835          * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    2836          */
    2837         else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
    2838                  && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    2839                  && !TRPMHasTrap(pVCpu))
    2840         {
    2841             Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
    2842             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
    2843                 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
    2844             {
    2845                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
    2846                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
    2847             }
    2848             else if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
    2849                      && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
    2850             {
    2851                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
    2852                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
    2853             }
    2854         }
    2855     }
    2856     /* TPR-below threshold/APIC write has the highest priority. */
    2857     else  if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
    2858     {
    2859         rcStrict = iemVmxApicWriteEmulation(pVCpu);
    2860         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    2861         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
    2862     }
    2863     /* MTF takes priority over VMX-preemption timer. */
    2864     else
    2865     {
    2866         rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
    2867         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    2868         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
    2869     }
    2870     return rcStrict;
    2871 }
    2872 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    2873 
    2874 
    2875 /**
    2876  * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
    2877  * IEMExecOneBypass and friends.
    2878  *
    2879  * Similar code is found in IEMExecLots.
    2880  *
    2881  * @return  Strict VBox status code.
    2882  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2883  * @param   fExecuteInhibit     If set, execute the instruction following CLI,
    2884  *                      POP SS and MOV SS,GR.
    2885  * @param   pszFunction The calling function name.
    2886  */
    2887 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
    2888 {
    2889     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    2890     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    2891     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    2892     RT_NOREF_PV(pszFunction);
    2893 
    2894 #ifdef IEM_WITH_SETJMP
    2895     VBOXSTRICTRC rcStrict;
    2896     IEM_TRY_SETJMP(pVCpu, rcStrict)
    2897     {
    2898         uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2899         rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2900     }
    2901     IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    2902     {
    2903         pVCpu->iem.s.cLongJumps++;
    2904     }
    2905     IEM_CATCH_LONGJMP_END(pVCpu);
    2906 #else
    2907     uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2908     VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2909 #endif
    2910     if (rcStrict == VINF_SUCCESS)
    2911         pVCpu->iem.s.cInstructions++;
    2912     if (pVCpu->iem.s.cActiveMappings > 0)
    2913     {
    2914         Assert(rcStrict != VINF_SUCCESS);
    2915         iemMemRollback(pVCpu);
    2916     }
    2917     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    2918     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    2919     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    2920 
    2921 //#ifdef DEBUG
    2922 //    AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
    2923 //#endif
    2924 
    2925 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2926     /*
    2927      * Perform any VMX nested-guest instruction boundary actions.
    2928      *
    2929      * If any of these causes a VM-exit, we must skip executing the next
    2930      * instruction (would run into stale page tables). A VM-exit makes sure
    2931      * there is no interrupt-inhibition, so that should ensure we don't go
    2932      * to try execute the next instruction. Clearing fExecuteInhibit is
    2933      * problematic because of the setjmp/longjmp clobbering above.
    2934      */
    2935     if (   !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    2936                                      | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
    2937         || rcStrict != VINF_SUCCESS)
    2938     { /* likely */ }
    2939     else
    2940         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    2941 #endif
    2942 
    2943     /* Execute the next instruction as well if a cli, pop ss or
    2944        mov ss, Gr has just completed successfully. */
    2945     if (   fExecuteInhibit
    2946         && rcStrict == VINF_SUCCESS
    2947         && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    2948     {
    2949         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
    2950         if (rcStrict == VINF_SUCCESS)
    2951         {
    2952 #ifdef LOG_ENABLED
    2953             iemLogCurInstr(pVCpu, false, pszFunction);
    2954 #endif
    2955 #ifdef IEM_WITH_SETJMP
    2956             IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
    2957             {
    2958                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2959                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2960             }
    2961             IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    2962             {
    2963                 pVCpu->iem.s.cLongJumps++;
    2964             }
    2965             IEM_CATCH_LONGJMP_END(pVCpu);
    2966 #else
    2967             IEM_OPCODE_GET_FIRST_U8(&b);
    2968             rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2969 #endif
    2970             if (rcStrict == VINF_SUCCESS)
    2971             {
    2972                 pVCpu->iem.s.cInstructions++;
    2973 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2974                 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    2975                                               | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
    2976                 { /* likely */ }
    2977                 else
    2978                     rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    2979 #endif
    2980             }
    2981             if (pVCpu->iem.s.cActiveMappings > 0)
    2982             {
    2983                 Assert(rcStrict != VINF_SUCCESS);
    2984                 iemMemRollback(pVCpu);
    2985             }
    2986             AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    2987             AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    2988             AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    2989         }
    2990         else if (pVCpu->iem.s.cActiveMappings > 0)
    2991             iemMemRollback(pVCpu);
    2992         /** @todo drop this after we bake this change into RIP advancing. */
    2993         CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
    2994     }
    2995 
    2996     /*
    2997      * Return value fiddling, statistics and sanity assertions.
    2998      */
    2999     rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3000 
    3001     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    3002     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    3003     return rcStrict;
    3004 }
    3005 
    3006 
    3007 /**
    3008  * Execute one instruction.
    3009  *
    3010  * @return  Strict VBox status code.
    3011  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    3012  */
    3013 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
    3014 {
    3015     AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
    3016 #ifdef LOG_ENABLED
    3017     iemLogCurInstr(pVCpu, true, "IEMExecOne");
    3018 #endif
    3019 
    3020     /*
    3021      * Do the decoding and emulation.
    3022      */
    3023     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    3024     if (rcStrict == VINF_SUCCESS)
    3025         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
    3026     else if (pVCpu->iem.s.cActiveMappings > 0)
    3027         iemMemRollback(pVCpu);
    3028 
    3029     if (rcStrict != VINF_SUCCESS)
    3030         LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    3031                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    3032     return rcStrict;
    3033 }
    3034 
    3035 
    3036 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    3037                                                         const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    3038 {
    3039     VBOXSTRICTRC rcStrict;
    3040     if (   cbOpcodeBytes
    3041         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    3042     {
    3043         iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
    3044 #ifdef IEM_WITH_CODE_TLB
    3045         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    3046         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    3047         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    3048         pVCpu->iem.s.offCurInstrStart = 0;
    3049         pVCpu->iem.s.offInstrNextByte = 0;
    3050         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    3051 #else
    3052         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    3053         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    3054 #endif
    3055         rcStrict = VINF_SUCCESS;
    3056     }
    3057     else
    3058         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    3059     if (rcStrict == VINF_SUCCESS)
    3060         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
    3061     else if (pVCpu->iem.s.cActiveMappings > 0)
    3062         iemMemRollback(pVCpu);
    3063 
    3064     return rcStrict;
    3065 }
    3066 
    3067 
    3068 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
    3069 {
    3070     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    3071     if (rcStrict == VINF_SUCCESS)
    3072         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");
    3073     else if (pVCpu->iem.s.cActiveMappings > 0)
    3074         iemMemRollback(pVCpu);
    3075 
    3076     return rcStrict;
    3077 }
    3078 
    3079 
    3080 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    3081                                                               const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    3082 {
    3083     VBOXSTRICTRC rcStrict;
    3084     if (   cbOpcodeBytes
    3085         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    3086     {
    3087         iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
    3088 #ifdef IEM_WITH_CODE_TLB
    3089         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    3090         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    3091         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    3092         pVCpu->iem.s.offCurInstrStart = 0;
    3093         pVCpu->iem.s.offInstrNextByte = 0;
    3094         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    3095 #else
    3096         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    3097         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    3098 #endif
    3099         rcStrict = VINF_SUCCESS;
    3100     }
    3101     else
    3102         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    3103     if (rcStrict == VINF_SUCCESS)
    3104         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
    3105     else if (pVCpu->iem.s.cActiveMappings > 0)
    3106         iemMemRollback(pVCpu);
    3107 
    3108     return rcStrict;
    3109 }
    3110 
    3111 
    3112 /**
    3113  * For handling split cacheline lock operations when the host has split-lock
    3114  * detection enabled.
    3115  *
    3116  * This will cause the interpreter to disregard the lock prefix and implicit
    3117  * locking (xchg).
    3118  *
    3119  * @returns Strict VBox status code.
    3120  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    3121  */
    3122 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
    3123 {
    3124     /*
    3125      * Do the decoding and emulation.
    3126      */
    3127     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
    3128     if (rcStrict == VINF_SUCCESS)
    3129         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
    3130     else if (pVCpu->iem.s.cActiveMappings > 0)
    3131         iemMemRollback(pVCpu);
    3132 
    3133     if (rcStrict != VINF_SUCCESS)
    3134         LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    3135                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    3136     return rcStrict;
    3137 }
    3138 
    3139 
    3140 /**
    3141  * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
    3142  * inject a pending TRPM trap.
    3143  */
    3144 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
    3145 {
    3146     Assert(TRPMHasTrap(pVCpu));
    3147 
    3148     if (   !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    3149         && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    3150     {
    3151         /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
    3152 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3153         bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
    3154         if (fIntrEnabled)
    3155         {
    3156             if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
    3157                 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    3158             else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    3159                 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
    3160             else
    3161             {
    3162                 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
    3163                 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
    3164             }
    3165         }
    3166 #else
    3167         bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    3168 #endif
    3169         if (fIntrEnabled)
    3170         {
    3171             uint8_t     u8TrapNo;
    3172             TRPMEVENT   enmType;
    3173             uint32_t    uErrCode;
    3174             RTGCPTR     uCr2;
    3175             int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
    3176             AssertRC(rc2);
    3177             Assert(enmType == TRPM_HARDWARE_INT);
    3178             VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
    3179 
    3180             TRPMResetTrap(pVCpu);
    3181 
    3182 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3183             /* Injecting an event may cause a VM-exit. */
    3184             if (   rcStrict != VINF_SUCCESS
    3185                 && rcStrict != VINF_IEM_RAISED_XCPT)
    3186                 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3187 #else
    3188             NOREF(rcStrict);
    3189 #endif
    3190         }
    3191     }
    3192 
    3193     return VINF_SUCCESS;
    3194 }
    3195 
    3196 
    3197 VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
    3198 {
    3199     uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
    3200     AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
    3201     Assert(cMaxInstructions > 0);
    3202 
    3203     /*
    3204      * See if there is an interrupt pending in TRPM, inject it if we can.
    3205      */
    3206     /** @todo What if we are injecting an exception and not an interrupt? Is that
    3207      *        possible here? For now we assert it is indeed only an interrupt. */
    3208     if (!TRPMHasTrap(pVCpu))
    3209     { /* likely */ }
    3210     else
    3211     {
    3212         VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
    3213         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3214         { /*likely */ }
    3215         else
    3216             return rcStrict;
    3217     }
    3218 
    3219     /*
    3220      * Initial decoder init w/ prefetch, then setup setjmp.
    3221      */
    3222     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    3223     if (rcStrict == VINF_SUCCESS)
    3224     {
    3225 #ifdef IEM_WITH_SETJMP
    3226         pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
    3227         IEM_TRY_SETJMP(pVCpu, rcStrict)
    3228 #endif
    3229         {
    3230             /*
    3231              * The run loop.  We limit ourselves to 4096 instructions right now.
    3232              */
    3233             uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
    3234             PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    3235             for (;;)
    3236             {
    3237                 /*
    3238                  * Log the state.
    3239                  */
    3240 #ifdef LOG_ENABLED
    3241                 iemLogCurInstr(pVCpu, true, "IEMExecLots");
    3242 #endif
    3243 
    3244                 /*
    3245                  * Do the decoding and emulation.
    3246                  */
    3247                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    3248                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    3249 #ifdef VBOX_STRICT
    3250                 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
    3251 #endif
    3252                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3253                 {
    3254                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3255                     pVCpu->iem.s.cInstructions++;
    3256 
    3257 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3258                     /* Perform any VMX nested-guest instruction boundary actions. */
    3259                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    3260                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    3261                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    3262                     { /* likely */ }
    3263                     else
    3264                     {
    3265                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    3266                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3267                             fCpu = pVCpu->fLocalForcedActions;
    3268                         else
    3269                         {
    3270                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3271                             break;
    3272                         }
    3273                     }
    3274 #endif
    3275                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    3276                     {
    3277 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    3278                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    3279 #endif
    3280                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    3281                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    3282                                                       | VMCPU_FF_TLB_FLUSH
    3283                                                       | VMCPU_FF_UNHALT );
    3284 
    3285                         if (RT_LIKELY(   (   !fCpu
    3286                                           || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    3287                                               && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
    3288                                       && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
    3289                         {
    3290                             if (--cMaxInstructionsGccStupidity > 0)
    3291                             {
    3292                                 /* Poll timers every now an then according to the caller's specs. */
    3293                                 if (   (cMaxInstructionsGccStupidity & cPollRate) != 0
    3294                                     || !TMTimerPollBool(pVM, pVCpu))
    3295                                 {
    3296                                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3297                                     iemReInitDecoder(pVCpu);
    3298                                     continue;
    3299                                 }
    3300                             }
    3301                         }
    3302                     }
    3303                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3304                 }
    3305                 else if (pVCpu->iem.s.cActiveMappings > 0)
    3306                     iemMemRollback(pVCpu);
    3307                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3308                 break;
    3309             }
    3310         }
    3311 #ifdef IEM_WITH_SETJMP
    3312         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    3313         {
    3314             if (pVCpu->iem.s.cActiveMappings > 0)
    3315                 iemMemRollback(pVCpu);
    3316 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3317             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3318 # endif
    3319             pVCpu->iem.s.cLongJumps++;
    3320         }
    3321         IEM_CATCH_LONGJMP_END(pVCpu);
    3322 #endif
    3323 
    3324         /*
    3325          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    3326          */
    3327         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    3328         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    3329     }
    3330     else
    3331     {
    3332         if (pVCpu->iem.s.cActiveMappings > 0)
    3333             iemMemRollback(pVCpu);
    3334 
    3335 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3336         /*
    3337          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    3338          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    3339          */
    3340         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3341 #endif
    3342     }
    3343 
    3344     /*
    3345      * Maybe re-enter raw-mode and log.
    3346      */
    3347     if (rcStrict != VINF_SUCCESS)
    3348         LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    3349                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    3350     if (pcInstructions)
    3351         *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
    3352     return rcStrict;
    3353 }
    3354 
    3355 
    3356 /**
    3357  * Interface used by EMExecuteExec, does exit statistics and limits.
    3358  *
    3359  * @returns Strict VBox status code.
    3360  * @param   pVCpu               The cross context virtual CPU structure.
    3361  * @param   fWillExit           To be defined.
    3362  * @param   cMinInstructions    Minimum number of instructions to execute before checking for FFs.
    3363  * @param   cMaxInstructions    Maximum number of instructions to execute.
    3364  * @param   cMaxInstructionsWithoutExits
    3365  *                              The max number of instructions without exits.
    3366  * @param   pStats              Where to return statistics.
    3367  */
    3368 VMM_INT_DECL(VBOXSTRICTRC)
    3369 IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
    3370                 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
    3371 {
    3372     NOREF(fWillExit); /** @todo define flexible exit crits */
    3373 
    3374     /*
    3375      * Initialize return stats.
    3376      */
    3377     pStats->cInstructions    = 0;
    3378     pStats->cExits           = 0;
    3379     pStats->cMaxExitDistance = 0;
    3380     pStats->cReserved        = 0;
    3381 
    3382     /*
    3383      * Initial decoder init w/ prefetch, then setup setjmp.
    3384      */
    3385     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    3386     if (rcStrict == VINF_SUCCESS)
    3387     {
    3388 #ifdef IEM_WITH_SETJMP
    3389         pVCpu->iem.s.cActiveMappings     = 0; /** @todo wtf?!? */
    3390         IEM_TRY_SETJMP(pVCpu, rcStrict)
    3391 #endif
    3392         {
    3393 #ifdef IN_RING0
    3394             bool const fCheckPreemptionPending   = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    3395 #endif
    3396             uint32_t   cInstructionSinceLastExit = 0;
    3397 
    3398             /*
    3399              * The run loop.  We limit ourselves to 4096 instructions right now.
    3400              */
    3401             PVM pVM = pVCpu->CTX_SUFF(pVM);
    3402             for (;;)
    3403             {
    3404                 /*
    3405                  * Log the state.
    3406                  */
    3407 #ifdef LOG_ENABLED
    3408                 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
    3409 #endif
    3410 
    3411                 /*
    3412                  * Do the decoding and emulation.
    3413                  */
    3414                 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
    3415 
    3416                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    3417                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    3418 
    3419                 if (   cPotentialExits != pVCpu->iem.s.cPotentialExits
    3420                     && cInstructionSinceLastExit > 0 /* don't count the first */ )
    3421                 {
    3422                     pStats->cExits += 1;
    3423                     if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
    3424                         pStats->cMaxExitDistance = cInstructionSinceLastExit;
    3425                     cInstructionSinceLastExit = 0;
    3426                 }
    3427 
    3428                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3429                 {
    3430                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3431                     pVCpu->iem.s.cInstructions++;
    3432                     pStats->cInstructions++;
    3433                     cInstructionSinceLastExit++;
    3434 
    3435 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3436                     /* Perform any VMX nested-guest instruction boundary actions. */
    3437                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    3438                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    3439                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    3440                     { /* likely */ }
    3441                     else
    3442                     {
    3443                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    3444                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3445                             fCpu = pVCpu->fLocalForcedActions;
    3446                         else
    3447                         {
    3448                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3449                             break;
    3450                         }
    3451                     }
    3452 #endif
    3453                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    3454                     {
    3455 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    3456                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    3457 #endif
    3458                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    3459                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    3460                                                       | VMCPU_FF_TLB_FLUSH
    3461                                                       | VMCPU_FF_UNHALT );
    3462                         if (RT_LIKELY(   (   (   !fCpu
    3463                                               || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    3464                                                   && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
    3465                                           && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
    3466                                       || pStats->cInstructions < cMinInstructions))
    3467                         {
    3468                             if (pStats->cInstructions < cMaxInstructions)
    3469                             {
    3470                                 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
    3471                                 {
    3472 #ifdef IN_RING0
    3473                                     if (   !fCheckPreemptionPending
    3474                                         || !RTThreadPreemptIsPending(NIL_RTTHREAD))
    3475 #endif
    3476                                     {
    3477                                         Assert(pVCpu->iem.s.cActiveMappings == 0);
    3478                                         iemReInitDecoder(pVCpu);
    3479                                         continue;
    3480                                     }
    3481 #ifdef IN_RING0
    3482                                     rcStrict = VINF_EM_RAW_INTERRUPT;
    3483                                     break;
    3484 #endif
    3485                                 }
    3486                             }
    3487                         }
    3488                         Assert(!(fCpu & VMCPU_FF_IEM));
    3489                     }
    3490                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3491                 }
    3492                 else if (pVCpu->iem.s.cActiveMappings > 0)
    3493                         iemMemRollback(pVCpu);
    3494                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3495                 break;
    3496             }
    3497         }
    3498 #ifdef IEM_WITH_SETJMP
    3499         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    3500         {
    3501             if (pVCpu->iem.s.cActiveMappings > 0)
    3502                 iemMemRollback(pVCpu);
    3503             pVCpu->iem.s.cLongJumps++;
    3504         }
    3505         IEM_CATCH_LONGJMP_END(pVCpu);
    3506 #endif
    3507 
    3508         /*
    3509          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    3510          */
    3511         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    3512         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    3513     }
    3514     else
    3515     {
    3516         if (pVCpu->iem.s.cActiveMappings > 0)
    3517             iemMemRollback(pVCpu);
    3518 
    3519 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3520         /*
    3521          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    3522          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    3523          */
    3524         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3525 #endif
    3526     }
    3527 
    3528     /*
    3529      * Maybe re-enter raw-mode and log.
    3530      */
    3531     if (rcStrict != VINF_SUCCESS)
    3532         LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
    3533                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
    3534                  pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
    3535     return rcStrict;
    3536 }
    3537 
    3538 
    3539 /**
    3540  * Injects a trap, fault, abort, software interrupt or external interrupt.
    3541  *
    3542  * The parameter list matches TRPMQueryTrapAll pretty closely.
    3543  *
    3544  * @returns Strict VBox status code.
    3545  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    3546  * @param   u8TrapNo            The trap number.
    3547  * @param   enmType             What type is it (trap/fault/abort), software
    3548  *                              interrupt or hardware interrupt.
    3549  * @param   uErrCode            The error code if applicable.
    3550  * @param   uCr2                The CR2 value if applicable.
    3551  * @param   cbInstr             The instruction length (only relevant for
    3552  *                              software interrupts).
    3553  * @note    x86 specific, but difficult to move due to iemInitDecoder dep.
    3554  */
    3555 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
    3556                                          uint8_t cbInstr)
    3557 {
    3558     iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
    3559 #ifdef DBGFTRACE_ENABLED
    3560     RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
    3561                       u8TrapNo, enmType, uErrCode, uCr2);
    3562 #endif
    3563 
    3564     uint32_t fFlags;
    3565     switch (enmType)
    3566     {
    3567         case TRPM_HARDWARE_INT:
    3568             Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
    3569             fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
    3570             uErrCode = uCr2 = 0;
    3571             break;
    3572 
    3573         case TRPM_SOFTWARE_INT:
    3574             Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
    3575             fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
    3576             uErrCode = uCr2 = 0;
    3577             break;
    3578 
    3579         case TRPM_TRAP:
    3580         case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
    3581             Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
    3582             fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
    3583             if (u8TrapNo == X86_XCPT_PF)
    3584                 fFlags |= IEM_XCPT_FLAGS_CR2;
    3585             switch (u8TrapNo)
    3586             {
    3587                 case X86_XCPT_DF:
    3588                 case X86_XCPT_TS:
    3589                 case X86_XCPT_NP:
    3590                 case X86_XCPT_SS:
    3591                 case X86_XCPT_PF:
    3592                 case X86_XCPT_AC:
    3593                 case X86_XCPT_GP:
    3594                     fFlags |= IEM_XCPT_FLAGS_ERR;
    3595                     break;
    3596             }
    3597             break;
    3598 
    3599         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    3600     }
    3601 
    3602     VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
    3603 
    3604     if (pVCpu->iem.s.cActiveMappings > 0)
    3605         iemMemRollback(pVCpu);
    3606 
    3607     return rcStrict;
    3608 }
    3609 
    3610 
    3611 /**
    3612  * Injects the active TRPM event.
    3613  *
    3614  * @returns Strict VBox status code.
    3615  * @param   pVCpu               The cross context virtual CPU structure.
    3616  */
    3617 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
    3618 {
    3619 #ifndef IEM_IMPLEMENTS_TASKSWITCH
    3620     IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
    3621 #else
    3622     uint8_t     u8TrapNo;
    3623     TRPMEVENT   enmType;
    3624     uint32_t    uErrCode;
    3625     RTGCUINTPTR uCr2;
    3626     uint8_t     cbInstr;
    3627     int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
    3628     if (RT_FAILURE(rc))
    3629         return rc;
    3630 
    3631     /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
    3632      *        ICEBP \#DB injection as a special case. */
    3633     VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
    3634 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    3635     if (rcStrict == VINF_SVM_VMEXIT)
    3636         rcStrict = VINF_SUCCESS;
    3637 #endif
    3638 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3639     if (rcStrict == VINF_VMX_VMEXIT)
    3640         rcStrict = VINF_SUCCESS;
    3641 #endif
    3642     /** @todo Are there any other codes that imply the event was successfully
    3643      *        delivered to the guest? See @bugref{6607}.  */
    3644     if (   rcStrict == VINF_SUCCESS
    3645         || rcStrict == VINF_IEM_RAISED_XCPT)
    3646         TRPMResetTrap(pVCpu);
    3647 
    3648     return rcStrict;
    3649 #endif
    3650 }
    3651 
    3652 
    3653 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
    3654 {
    3655     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    3656     return VERR_NOT_IMPLEMENTED;
    3657 }
    3658 
    3659 
    3660 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
    3661 {
    3662     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    3663     return VERR_NOT_IMPLEMENTED;
    3664 }
    3665 
    3666 #ifdef IN_RING3
    3667 
    3668 /**
    3669  * Handles the unlikely and probably fatal merge cases.
    3670  *
    3671  * @returns Merged status code.
    3672  * @param   rcStrict        Current EM status code.
    3673  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    3674  *                          with @a rcStrict.
    3675  * @param   iMemMap         The memory mapping index. For error reporting only.
    3676  * @param   pVCpu           The cross context virtual CPU structure of the calling
    3677  *                          thread, for error reporting only.
    3678  */
    3679 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
    3680                                                           unsigned iMemMap, PVMCPUCC pVCpu)
    3681 {
    3682     if (RT_FAILURE_NP(rcStrict))
    3683         return rcStrict;
    3684 
    3685     if (RT_FAILURE_NP(rcStrictCommit))
    3686         return rcStrictCommit;
    3687 
    3688     if (rcStrict == rcStrictCommit)
    3689         return rcStrictCommit;
    3690 
    3691     AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
    3692                            VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
    3693                            pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
    3694                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
    3695                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
    3696     return VERR_IOM_FF_STATUS_IPE;
    3697 }
    3698 
    3699 
    3700 /**
    3701  * Helper for IOMR3ProcessForceFlag.
    3702  *
    3703  * @returns Merged status code.
    3704  * @param   rcStrict        Current EM status code.
    3705  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    3706  *                          with @a rcStrict.
    3707  * @param   iMemMap         The memory mapping index. For error reporting only.
    3708  * @param   pVCpu           The cross context virtual CPU structure of the calling
    3709  *                          thread, for error reporting only.
    3710  */
    3711 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
    3712 {
    3713     /* Simple. */
    3714     if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
    3715         return rcStrictCommit;
    3716 
    3717     if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
    3718         return rcStrict;
    3719 
    3720     /* EM scheduling status codes. */
    3721     if (RT_LIKELY(   rcStrict >= VINF_EM_FIRST
    3722                   && rcStrict <= VINF_EM_LAST))
    3723     {
    3724         if (RT_LIKELY(   rcStrictCommit >= VINF_EM_FIRST
    3725                       && rcStrictCommit <= VINF_EM_LAST))
    3726             return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
    3727     }
    3728 
    3729     /* Unlikely */
    3730     return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
    3731 }
    3732 
    3733 
    3734 /**
    3735  * Called by force-flag handling code when VMCPU_FF_IEM is set.
    3736  *
    3737  * @returns Merge between @a rcStrict and what the commit operation returned.
    3738  * @param   pVM         The cross context VM structure.
    3739  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    3740  * @param   rcStrict    The status code returned by ring-0 or raw-mode.
    3741  */
    3742 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    3743 {
    3744     /*
    3745      * Reset the pending commit.
    3746      */
    3747     AssertMsg(  (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
    3748               & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
    3749               ("%#x %#x %#x\n",
    3750                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    3751     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
    3752 
    3753     /*
    3754      * Commit the pending bounce buffers (usually just one).
    3755      */
    3756     unsigned cBufs = 0;
    3757     unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    3758     while (iMemMap-- > 0)
    3759         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
    3760         {
    3761             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    3762             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    3763             Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
    3764 
    3765             uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    3766             uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    3767             uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    3768 
    3769             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
    3770             {
    3771                 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
    3772                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    3773                                                             pbBuf,
    3774                                                             cbFirst,
    3775                                                             PGMACCESSORIGIN_IEM);
    3776                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
    3777                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
    3778                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    3779                      VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
    3780             }
    3781 
    3782             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
    3783             {
    3784                 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
    3785                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    3786                                                             pbBuf + cbFirst,
    3787                                                             cbSecond,
    3788                                                             PGMACCESSORIGIN_IEM);
    3789                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
    3790                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
    3791                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
    3792                      VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
    3793             }
    3794             cBufs++;
    3795             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    3796         }
    3797 
    3798     AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
    3799               ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
    3800                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    3801     pVCpu->iem.s.cActiveMappings = 0;
    3802     return rcStrict;
    3803 }
    3804 
    3805 #endif /* IN_RING3 */
    3806 
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette