VirtualBox

Changeset 108247 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Feb 17, 2025 12:28:23 AM (3 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167568
Message:

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

Location:
trunk/src/VBox/VMM
Files:
2 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r108245 r108247  
    195195        VMMAll/HMVMXAll.cpp \
    196196        VMMAll/IEMAll.cpp \
     197        VMMAll/IEMAllTlb.cpp \
    197198        VMMAll/target-x86/IEMAll-x86.cpp \
    198199        VMMAll/target-x86/IEMAllExec-x86.cpp \
     
    362363if "$(USERNAME)" == "bird" && "$(KBUILD_TARGET)" == "win"
    363364 VBoxVMM_VMMAll/IEMAll.cpp_CXXFLAGS                                 = /FAcs /Fa$(subst /,\\,$(outbase).cod)
     365 VBoxVMM_VMMAll/IEMAllTlb.cpp_CXXFLAGS                              = /FAcs /Fa$(subst /,\\,$(outbase).cod)
    364366 VBoxVMM_VMMAll/target-x86/IEMAllIntprTables1-x86.cpp_CXXFLAGS      = /FAcs /Fa$(subst /,\\,$(outbase).cod)
    365367 VBoxVMM_VMMAll/target-x86/IEMAllIntprTables2-x86.cpp_CXXFLAGS      = /FAcs /Fa$(subst /,\\,$(outbase).cod)
     
    944946        VMMAll/HMVMXAll.cpp \
    945947        VMMAll/IEMAll.cpp \
     948        VMMAll/IEMAllTlb.cpp \
    946949        VMMAll/target-x86/IEMAll-x86.cpp \
    947950        VMMAll/target-x86/IEMAllExec-x86.cpp \
     
    10191022 if "$(USERNAME)" == "bird" && "$(KBUILD_TARGET)" == "win"
    10201023  VMMR0_VMMAll/IEMAll.cpp_CXXFLAGS                      = /FAcs /Fa$(subst /,\\,$(outbase).cod)
     1024  VMMR0_VMMAll/IEMAllTlb.cpp_CXXFLAGS                   = /FAcs /Fa$(subst /,\\,$(outbase).cod)
    10211025  VMMR0_VMMAll/target-x86/IEMAllAImplC-x86.cpp_CXXFLAGS = /FAcs /Fa$(subst /,\\,$(outbase).cod)
    10221026  VMMR0_VMMAll/PGMAll.cpp_CXXFLAGS                      = /FAcs /Fa$(subst /,\\,$(outbase).cod)
     
    11651169  # cl : Command line warning D9025 : overriding '/Oy-' with '/Oy'
    11661170  VMMAll/IEMAll.cpp_CXXFLAGS                                       += -noover -O2xy
     1171  VMMAll/IEMAllTlb.cpp_CXXFLAGS                                    += -noover -O2xy
    11671172  VMMAll/target-x86/IEMAllExec-x86.cpp_CXXFLAGS                    += -noover -O2xy
     1173  VMMAll/target-x86/IEMAllHlpFpu-x86.cpp_CXXFLAGS                  += -noover -O2xy
    11681174  VMMAll/target-x86/IEMAllMem-x86.cpp_CXXFLAGS                     += -noover -O2xy
     1175  VMMAll/target-x86/IEMAllOpHlp-x86.cpp_CXXFLAGS                   += -noover -O2xy
     1176  VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp_CXXFLAGS             += -noover -O2xy
    11691177  VMMAll/target-x86/IEMAllXcpt-x86.cpp_CXXFLAGS                    += -noover -O2xy
    1170   VMMAll/target-x86/IEMAllHlpFpu-x86.cpp_CXXFLAGS                  += -noover -O2xy
    11711178  VMMAll/target-x86/IEMAllAImplC-x86.cpp_CXXFLAGS                  += -noover -O2xy
    11721179  VMMAll/target-x86/IEMAllCImpl-x86.cpp_CXXFLAGS                   += -noover -O2xy
     
    11801187  # Omitting the frame pointer results in larger code, but it might be worth it. (esp addressing vs ebp?)
    11811188  VMMAll/IEMAll.cpp_CXXFLAGS                                       += -O2 -fomit-frame-pointer
     1189  VMMAll/IEMAllTlb.cpp_CXXFLAGS                                    += -O2 -fomit-frame-pointer
     1190  VMMAll/target-x86/IEMAll-x86.cpp_CXXFLAGS                        += -O2 -fomit-frame-pointer
    11821191  VMMAll/target-x86/IEMAllExec-x86.cpp_CXXFLAGS                    += -O2 -fomit-frame-pointer
     1192  VMMAll/target-x86/IEMAllHlpFpu-x86.cpp_CXXFLAGS                  += -O2 -fomit-frame-pointer
    11831193  VMMAll/target-x86/IEMAllMem-x86.cpp_CXXFLAGS                     += -O2 -fomit-frame-pointer
     1194  VMMAll/target-x86/IEMAllOpHlp-x86.cpp_CXXFLAGS                   += -O2 -fomit-frame-pointer
     1195  VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp_CXXFLAGS             += -O2 -fomit-frame-pointer
    11841196  VMMAll/target-x86/IEMAllXcpt-x86.cpp_CXXFLAGS                    += -O2 -fomit-frame-pointer
    1185   VMMAll/target-x86/IEMAllHlpFpu-x86.cpp_CXXFLAGS                  += -O2 -fomit-frame-pointer
    11861197  VMMAll/target-x86/IEMAllCImpl-x86.cpp_CXXFLAGS                   += -O2 -fomit-frame-pointer
    11871198  VMMAll/target-x86/IEMAllCImplSvmInstr-x86.cpp_CXXFLAGS           += -O2 -fomit-frame-pointer
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r108245 r108247  
    397397#else
    398398    return VINF_SUCCESS;
    399 #endif
    400 }
    401 
    402 
    403 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    404 /**
    405  * Worker for iemTlbInvalidateAll.
    406  */
    407 template<bool a_fGlobal>
    408 DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
    409 {
    410     if (!a_fGlobal)
    411         pTlb->cTlsFlushes++;
    412     else
    413         pTlb->cTlsGlobalFlushes++;
    414 
    415     pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
    416     if (RT_LIKELY(pTlb->uTlbRevision != 0))
    417     { /* very likely */ }
    418     else
    419     {
    420         pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
    421         pTlb->cTlbRevisionRollovers++;
    422         unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    423         while (i-- > 0)
    424             pTlb->aEntries[i * 2].uTag = 0;
    425     }
    426 
    427     pTlb->cTlbNonGlobalLargePageCurLoads    = 0;
    428     pTlb->NonGlobalLargePageRange.uLastTag  = 0;
    429     pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
    430 
    431     if (a_fGlobal)
    432     {
    433         pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
    434         if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
    435         { /* very likely */ }
    436         else
    437         {
    438             pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
    439             pTlb->cTlbRevisionRollovers++;
    440             unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    441             while (i-- > 0)
    442                 pTlb->aEntries[i * 2 + 1].uTag = 0;
    443         }
    444 
    445         pTlb->cTlbGlobalLargePageCurLoads    = 0;
    446         pTlb->GlobalLargePageRange.uLastTag  = 0;
    447         pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
    448     }
    449 }
    450 #endif
    451 
    452 
    453 /**
    454  * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
    455  */
    456 template<bool a_fGlobal>
    457 DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
    458 {
    459 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    460     Log10(("IEMTlbInvalidateAll\n"));
    461 
    462 # ifdef IEM_WITH_CODE_TLB
    463     pVCpu->iem.s.cbInstrBufTotal = 0;
    464     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
    465     if (a_fGlobal)
    466         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
    467     else
    468         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
    469 # endif
    470 
    471 # ifdef IEM_WITH_DATA_TLB
    472     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
    473     if (a_fGlobal)
    474         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
    475     else
    476         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
    477 # endif
    478 #else
    479     RT_NOREF(pVCpu);
    480 #endif
    481 }
    482 
    483 
    484 /**
    485  * Invalidates non-global the IEM TLB entries.
    486  *
    487  * This is called internally as well as by PGM when moving GC mappings.
    488  *
    489  * @param   pVCpu       The cross context virtual CPU structure of the calling
    490  *                      thread.
    491  */
    492 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
    493 {
    494     iemTlbInvalidateAll<false>(pVCpu);
    495 }
    496 
    497 
    498 /**
    499  * Invalidates all the IEM TLB entries.
    500  *
    501  * This is called internally as well as by PGM when moving GC mappings.
    502  *
    503  * @param   pVCpu       The cross context virtual CPU structure of the calling
    504  *                      thread.
    505  */
    506 VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
    507 {
    508     iemTlbInvalidateAll<true>(pVCpu);
    509 }
    510 
    511 
    512 /**
    513  * Invalidates a page in the TLBs.
    514  *
    515  * @param   pVCpu       The cross context virtual CPU structure of the calling
    516  *                      thread.
    517  * @param   GCPtr       The address of the page to invalidate
    518  * @thread EMT(pVCpu)
    519  */
    520 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
    521 {
    522     IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
    523 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    524     Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
    525     GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
    526     Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
    527     uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
    528 
    529 # ifdef IEM_WITH_CODE_TLB
    530     iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
    531 # endif
    532 # ifdef IEM_WITH_DATA_TLB
    533     iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
    534 # endif
    535 #else
    536     NOREF(pVCpu); NOREF(GCPtr);
    537 #endif
    538 }
    539 
    540 
    541 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    542 /**
    543  * Invalid both TLBs slow fashion following a rollover.
    544  *
    545  * Worker for IEMTlbInvalidateAllPhysical,
    546  * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
    547  * iemMemMapJmp and others.
    548  *
    549  * @thread EMT(pVCpu)
    550  */
    551 void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT
    552 {
    553     Log10(("iemTlbInvalidateAllPhysicalSlow\n"));
    554     ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    555     ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    556 
    557     unsigned i;
    558 # ifdef IEM_WITH_CODE_TLB
    559     i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
    560     while (i-- > 0)
    561     {
    562         pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3       = NULL;
    563         pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    564                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    565     }
    566     pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
    567     pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    568 # endif
    569 # ifdef IEM_WITH_DATA_TLB
    570     i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
    571     while (i-- > 0)
    572     {
    573         pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3       = NULL;
    574         pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    575                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    576     }
    577     pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
    578     pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    579 # endif
    580 
    581 }
    582 #endif
    583 
    584 
    585 /**
    586  * Invalidates the host physical aspects of the IEM TLBs.
    587  *
    588  * This is called internally as well as by PGM when moving GC mappings.
    589  *
    590  * @param   pVCpu       The cross context virtual CPU structure of the calling
    591  *                      thread.
    592  * @note    Currently not used.
    593  */
    594 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
    595 {
    596 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    597     /* Note! This probably won't end up looking exactly like this, but it give an idea... */
    598     Log10(("IEMTlbInvalidateAllPhysical\n"));
    599 
    600 # ifdef IEM_WITH_CODE_TLB
    601     pVCpu->iem.s.cbInstrBufTotal = 0;
    602 # endif
    603     uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
    604     if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
    605     {
    606         pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
    607         pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    608         pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
    609         pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    610     }
    611     else
    612         iemTlbInvalidateAllPhysicalSlow(pVCpu);
    613 #else
    614     NOREF(pVCpu);
    615 #endif
    616 }
    617 
    618 
    619 /**
    620  * Invalidates the host physical aspects of the IEM TLBs.
    621  *
    622  * This is called internally as well as by PGM when moving GC mappings.
    623  *
    624  * @param   pVM         The cross context VM structure.
    625  * @param   idCpuCaller The ID of the calling EMT if available to the caller,
    626  *                      otherwise NIL_VMCPUID.
    627  * @param   enmReason   The reason we're called.
    628  *
    629  * @remarks Caller holds the PGM lock.
    630  */
    631 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
    632 {
    633 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    634     PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
    635     if (pVCpuCaller)
    636         VMCPU_ASSERT_EMT(pVCpuCaller);
    637     Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
    638 
    639     VMCC_FOR_EACH_VMCPU(pVM)
    640     {
    641 # ifdef IEM_WITH_CODE_TLB
    642         if (pVCpuCaller == pVCpu)
    643             pVCpu->iem.s.cbInstrBufTotal = 0;
    644 # endif
    645 
    646         uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
    647         uint64_t       uTlbPhysRevNew  = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
    648         if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
    649         { /* likely */}
    650         else if (pVCpuCaller != pVCpu)
    651             uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
    652         else
    653         {
    654             iemTlbInvalidateAllPhysicalSlow(pVCpu);
    655             continue;
    656         }
    657         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    658             pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    659 
    660         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    661             pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    662     }
    663     VMCC_FOR_EACH_VMCPU_END(pVM);
    664 
    665 #else
    666     RT_NOREF(pVM, idCpuCaller, enmReason);
    667399#endif
    668400}
  • trunk/src/VBox/VMM/VMMAll/IEMAllTlb.cpp

    r108245 r108247  
    11/* $Id$ */
    22/** @file
    3  * IEM - Interpreted Execution Manager - All Contexts.
     3 * IEM - Interpreted Execution Manager - TLB Management.
    44 */
    55
     
    2525 * SPDX-License-Identifier: GPL-3.0-only
    2626 */
    27 
    28 
    29 /** @page pg_iem    IEM - Interpreted Execution Manager
    30  *
    31  * The interpreted exeuction manager (IEM) is for executing short guest code
    32  * sequences that are causing too many exits / virtualization traps.  It will
    33  * also be used to interpret single instructions, thus replacing the selective
    34  * interpreters in EM and IOM.
    35  *
    36  * Design goals:
    37  *      - Relatively small footprint, although we favour speed and correctness
    38  *        over size.
    39  *      - Reasonably fast.
    40  *      - Correctly handle lock prefixed instructions.
    41  *      - Complete instruction set - eventually.
    42  *      - Refactorable into a recompiler, maybe.
    43  *      - Replace EMInterpret*.
    44  *
    45  * Using the existing disassembler has been considered, however this is thought
    46  * to conflict with speed as the disassembler chews things a bit too much while
    47  * leaving us with a somewhat complicated state to interpret afterwards.
    48  *
    49  *
    50  * The current code is very much work in progress. You've been warned!
    51  *
    52  *
    53  * @section sec_iem_fpu_instr   FPU Instructions
    54  *
    55  * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
    56  * same or equivalent instructions on the host FPU.  To make life easy, we also
    57  * let the FPU prioritize the unmasked exceptions for us.  This however, only
    58  * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
    59  * for FPU exception delivery, because with CR0.NE=0 there is a window where we
    60  * can trigger spurious FPU exceptions.
    61  *
    62  * The guest FPU state is not loaded into the host CPU and kept there till we
    63  * leave IEM because the calling conventions have declared an all year open
    64  * season on much of the FPU state.  For instance an innocent looking call to
    65  * memcpy might end up using a whole bunch of XMM or MM registers if the
    66  * particular implementation finds it worthwhile.
    67  *
    68  *
    69  * @section sec_iem_logging     Logging
    70  *
    71  * The IEM code uses the \"IEM\" log group for the main logging. The different
    72  * logging levels/flags are generally used for the following purposes:
    73  *      - Level 1  (Log)  : Errors, exceptions, interrupts and such major events.
    74  *      - Flow  (LogFlow) : Basic enter/exit IEM state info.
    75  *      - Level 2  (Log2) : ?
    76  *      - Level 3  (Log3) : More detailed enter/exit IEM state info.
    77  *      - Level 4  (Log4) : Decoding mnemonics w/ EIP.
    78  *      - Level 5  (Log5) : Decoding details.
    79  *      - Level 6  (Log6) : Enables/disables the lockstep comparison with REM.
    80  *      - Level 7  (Log7) : iret++ execution logging.
    81  *      - Level 8  (Log8) :
    82  *      - Level 9  (Log9) :
    83  *      - Level 10 (Log10): TLBs.
    84  *      - Level 11 (Log11): Unmasked FPU exceptions.
    85  *
    86  * The \"IEM_MEM\" log group covers most of memory related details logging,
    87  * except for errors and exceptions:
    88  *      - Level 1  (Log)  : Reads.
    89  *      - Level 2  (Log2) : Read fallbacks.
    90  *      - Level 3  (Log3) : MemMap read.
    91  *      - Level 4  (Log4) : MemMap read fallbacks.
    92  *      - Level 5  (Log5) : Writes
    93  *      - Level 6  (Log6) : Write fallbacks.
    94  *      - Level 7  (Log7) : MemMap writes and read-writes.
    95  *      - Level 8  (Log8) : MemMap write and read-write fallbacks.
    96  *      - Level 9  (Log9) : Stack reads.
    97  *      - Level 10 (Log10): Stack read fallbacks.
    98  *      - Level 11 (Log11): Stack writes.
    99  *      - Level 12 (Log12): Stack write fallbacks.
    100  *      - Flow  (LogFlow) :
    101  *
    102  * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
    103  *      - Level 1  (Log)  : Errors and other major events.
    104  *      - Flow (LogFlow)  : Misc flow stuff (cleanup?)
    105  *      - Level 2  (Log2) : VM exits.
    106  *
    107  * The syscall logging level assignments:
    108  *      - Level 1: DOS and BIOS.
    109  *      - Level 2: Windows 3.x
    110  *      - Level 3: Linux.
    111  */
    112 
    113 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
    114 #ifdef _MSC_VER
    115 # pragma warning(disable:4505)
    116 #endif
    11727
    11828
     
    12737#include <VBox/vmm/iem.h>
    12838#include <VBox/vmm/cpum.h>
    129 #include <VBox/vmm/pdmapic.h>
    130 #include <VBox/vmm/pdm.h>
    13139#include <VBox/vmm/pgm.h>
    132 #include <VBox/vmm/iom.h>
    133 #include <VBox/vmm/em.h>
    134 #include <VBox/vmm/hm.h>
    135 #include <VBox/vmm/nem.h>
    136 #include <VBox/vmm/gcm.h>
    137 #include <VBox/vmm/gim.h>
    138 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    139 # include <VBox/vmm/em.h>
    140 # include <VBox/vmm/hm_svm.h>
    141 #endif
    142 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    143 # include <VBox/vmm/hmvmxinline.h>
    144 #endif
    145 #include <VBox/vmm/tm.h>
    14640#include <VBox/vmm/dbgf.h>
    147 #include <VBox/vmm/dbgftrace.h>
    14841#include "IEMInternal.h"
    14942#include <VBox/vmm/vmcc.h>
    15043#include <VBox/log.h>
    151 #include <VBox/err.h>
    152 #include <VBox/param.h>
    153 #include <VBox/dis.h>
    154 #include <iprt/asm-math.h>
    155 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    156 # include <iprt/asm-amd64-x86.h>
    157 #elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
    158 # include <iprt/asm-arm.h>
    159 #endif
    16044#include <iprt/assert.h>
    16145#include <iprt/string.h>
    16246#include <iprt/x86.h>
    16347
    164 #include "IEMInline.h"
     48//#include "IEMInline.h"
    16549#ifdef VBOX_VMM_TARGET_X86
    16650# include "target-x86/IEMAllTlbInline-x86.h"
    16751#endif
    16852
    169 
    170 /*********************************************************************************************************************************
    171 *   Global Variables                                                                                                             *
    172 *********************************************************************************************************************************/
    173 #if defined(IEM_LOG_MEMORY_WRITES)
    174 /** What IEM just wrote. */
    175 uint8_t g_abIemWrote[256];
    176 /** How much IEM just wrote. */
    177 size_t g_cbIemWrote;
    178 #endif
    179 
    180 
    181 /**
    182  * Initializes the decoder state.
    183  *
    184  * iemReInitDecoder is mostly a copy of this function.
    185  *
    186  * @param   pVCpu               The cross context virtual CPU structure of the
    187  *                              calling thread.
    188  * @param   fExecOpts           Optional execution flags:
    189  *                                  - IEM_F_BYPASS_HANDLERS
    190  *                                  - IEM_F_X86_DISREGARD_LOCK
    191  */
    192 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
    193 {
    194     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    195     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    196     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    197     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    198     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    199     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    200     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    201     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    202     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    203     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    204 
    205     /* Execution state: */
    206     uint32_t fExec;
    207     pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
    208 
    209     /* Decoder state: */
    210     pVCpu->iem.s.enmDefAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    211     pVCpu->iem.s.enmEffAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;
    212     if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
    213     {
    214         pVCpu->iem.s.enmDefOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    215         pVCpu->iem.s.enmEffOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;
    216     }
    217     else
    218     {
    219         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    220         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    221     }
    222     pVCpu->iem.s.fPrefixes          = 0;
    223     pVCpu->iem.s.uRexReg            = 0;
    224     pVCpu->iem.s.uRexB              = 0;
    225     pVCpu->iem.s.uRexIndex          = 0;
    226     pVCpu->iem.s.idxPrefix          = 0;
    227     pVCpu->iem.s.uVex3rdReg         = 0;
    228     pVCpu->iem.s.uVexLength         = 0;
    229     pVCpu->iem.s.fEvexStuff         = 0;
    230     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    231 #ifdef IEM_WITH_CODE_TLB
    232     pVCpu->iem.s.pbInstrBuf         = NULL;
    233     pVCpu->iem.s.offInstrNextByte   = 0;
    234     pVCpu->iem.s.offCurInstrStart   = 0;
    235 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    236     pVCpu->iem.s.offOpcode          = 0;
    237 # endif
    238 # ifdef VBOX_STRICT
    239     pVCpu->iem.s.GCPhysInstrBuf     = NIL_RTGCPHYS;
    240     pVCpu->iem.s.cbInstrBuf         = UINT16_MAX;
    241     pVCpu->iem.s.cbInstrBufTotal    = UINT16_MAX;
    242     pVCpu->iem.s.uInstrBufPc        = UINT64_C(0xc0ffc0ffcff0c0ff);
    243 # endif
    244 #else
    245     pVCpu->iem.s.offOpcode          = 0;
    246     pVCpu->iem.s.cbOpcode           = 0;
    247 #endif
    248     pVCpu->iem.s.offModRm           = 0;
    249     pVCpu->iem.s.cActiveMappings    = 0;
    250     pVCpu->iem.s.iNextMapping       = 0;
    251     pVCpu->iem.s.rcPassUp           = VINF_SUCCESS;
    252 
    253 #ifdef DBGFTRACE_ENABLED
    254     switch (IEM_GET_CPU_MODE(pVCpu))
    255     {
    256         case IEMMODE_64BIT:
    257             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    258             break;
    259         case IEMMODE_32BIT:
    260             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    261             break;
    262         case IEMMODE_16BIT:
    263             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    264             break;
    265     }
    266 #endif
    267 }
    268 
    269 
    270 /**
    271  * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
    272  *
    273  * This is mostly a copy of iemInitDecoder.
    274  *
    275  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    276  */
    277 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
    278 {
    279     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    280     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    281     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    282     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    283     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    284     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    285     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    286     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    287     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    288 
    289     /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
    290     AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
    291               ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
    292 
    293     IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
    294     pVCpu->iem.s.enmDefAddrMode     = enmMode;  /** @todo check if this is correct... */
    295     pVCpu->iem.s.enmEffAddrMode     = enmMode;
    296     if (enmMode != IEMMODE_64BIT)
    297     {
    298         pVCpu->iem.s.enmDefOpSize   = enmMode;  /** @todo check if this is correct... */
    299         pVCpu->iem.s.enmEffOpSize   = enmMode;
    300     }
    301     else
    302     {
    303         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    304         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    305     }
    306     pVCpu->iem.s.fPrefixes          = 0;
    307     pVCpu->iem.s.uRexReg            = 0;
    308     pVCpu->iem.s.uRexB              = 0;
    309     pVCpu->iem.s.uRexIndex          = 0;
    310     pVCpu->iem.s.idxPrefix          = 0;
    311     pVCpu->iem.s.uVex3rdReg         = 0;
    312     pVCpu->iem.s.uVexLength         = 0;
    313     pVCpu->iem.s.fEvexStuff         = 0;
    314     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    315 #ifdef IEM_WITH_CODE_TLB
    316     if (pVCpu->iem.s.pbInstrBuf)
    317     {
    318         uint64_t off = (enmMode == IEMMODE_64BIT
    319                         ? pVCpu->cpum.GstCtx.rip
    320                         : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
    321                      - pVCpu->iem.s.uInstrBufPc;
    322         if (off < pVCpu->iem.s.cbInstrBufTotal)
    323         {
    324             pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
    325             pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
    326             if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
    327                 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
    328             else
    329                 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
    330         }
    331         else
    332         {
    333             pVCpu->iem.s.pbInstrBuf       = NULL;
    334             pVCpu->iem.s.offInstrNextByte = 0;
    335             pVCpu->iem.s.offCurInstrStart = 0;
    336             pVCpu->iem.s.cbInstrBuf       = 0;
    337             pVCpu->iem.s.cbInstrBufTotal  = 0;
    338             pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    339         }
    340     }
    341     else
    342     {
    343         pVCpu->iem.s.offInstrNextByte = 0;
    344         pVCpu->iem.s.offCurInstrStart = 0;
    345         pVCpu->iem.s.cbInstrBuf       = 0;
    346         pVCpu->iem.s.cbInstrBufTotal  = 0;
    347 # ifdef VBOX_STRICT
    348         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    349 # endif
    350     }
    351 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    352     pVCpu->iem.s.offOpcode          = 0;
    353 # endif
    354 #else  /* !IEM_WITH_CODE_TLB */
    355     pVCpu->iem.s.cbOpcode           = 0;
    356     pVCpu->iem.s.offOpcode          = 0;
    357 #endif /* !IEM_WITH_CODE_TLB */
    358     pVCpu->iem.s.offModRm           = 0;
    359     Assert(pVCpu->iem.s.cActiveMappings == 0);
    360     pVCpu->iem.s.iNextMapping       = 0;
    361     Assert(pVCpu->iem.s.rcPassUp   == VINF_SUCCESS);
    362     Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
    363 
    364 #ifdef DBGFTRACE_ENABLED
    365     switch (enmMode)
    366     {
    367         case IEMMODE_64BIT:
    368             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    369             break;
    370         case IEMMODE_32BIT:
    371             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    372             break;
    373         case IEMMODE_16BIT:
    374             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    375             break;
    376     }
    377 #endif
    378 }
    379 
    380 
    381 /**
    382  * Prefetch opcodes the first time when starting executing.
    383  *
    384  * @returns Strict VBox status code.
    385  * @param   pVCpu               The cross context virtual CPU structure of the
    386  *                              calling thread.
    387  * @param   fExecOpts           Optional execution flags:
    388  *                                  - IEM_F_BYPASS_HANDLERS
    389  *                                  - IEM_F_X86_DISREGARD_LOCK
    390  */
    391 DECLINLINE(VBOXSTRICTRC) iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
    392 {
    393     iemInitDecoder(pVCpu, fExecOpts);
    394 
    395 #ifndef IEM_WITH_CODE_TLB
    396     return iemOpcodeFetchPrefetch(pVCpu);
    397 #else
    398     return VINF_SUCCESS;
    399 #endif
    400 }
    40153
    40254
     
    668320}
    669321
    670 
    671 /** @name   Memory access.
    672  *
    673  * @{
    674  */
    675 
    676 #undef  LOG_GROUP
    677 #define LOG_GROUP LOG_GROUP_IEM_MEM
    678 
    679 #if 0 /*unused*/
    680 /**
    681  * Looks up a memory mapping entry.
    682  *
    683  * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
    684  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    685  * @param   pvMem           The memory address.
    686  * @param   fAccess         The access to.
    687  */
    688 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    689 {
    690     Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    691     fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
    692     if (   pVCpu->iem.s.aMemMappings[0].pv == pvMem
    693         && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    694         return 0;
    695     if (   pVCpu->iem.s.aMemMappings[1].pv == pvMem
    696         && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    697         return 1;
    698     if (   pVCpu->iem.s.aMemMappings[2].pv == pvMem
    699         && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    700         return 2;
    701     return VERR_NOT_FOUND;
    702 }
    703 #endif
    704 
    705 /**
    706  * Finds a free memmap entry when using iNextMapping doesn't work.
    707  *
    708  * @returns Memory mapping index, 1024 on failure.
    709  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    710  */
    711 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
    712 {
    713     /*
    714      * The easy case.
    715      */
    716     if (pVCpu->iem.s.cActiveMappings == 0)
    717     {
    718         pVCpu->iem.s.iNextMapping = 1;
    719         return 0;
    720     }
    721 
    722     /* There should be enough mappings for all instructions. */
    723     AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
    724 
    725     for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
    726         if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
    727             return i;
    728 
    729     AssertFailedReturn(1024);
    730 }
    731 
    732 
    733 /**
    734  * Commits a bounce buffer that needs writing back and unmaps it.
    735  *
    736  * @returns Strict VBox status code.
    737  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    738  * @param   iMemMap         The index of the buffer to commit.
    739  * @param   fPostponeFail   Whether we can postpone writer failures to ring-3.
    740  *                          Always false in ring-3, obviously.
    741  */
    742 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
    743 {
    744     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    745     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    746 #ifdef IN_RING3
    747     Assert(!fPostponeFail);
    748     RT_NOREF_PV(fPostponeFail);
    749 #endif
    750 
    751     /*
    752      * Do the writing.
    753      */
    754     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    755     if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
    756     {
    757         uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    758         uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    759         uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    760         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    761         {
    762             /*
    763              * Carefully and efficiently dealing with access handler return
    764              * codes make this a little bloated.
    765              */
    766             VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
    767                                                  pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    768                                                  pbBuf,
    769                                                  cbFirst,
    770                                                  PGMACCESSORIGIN_IEM);
    771             if (rcStrict == VINF_SUCCESS)
    772             {
    773                 if (cbSecond)
    774                 {
    775                     rcStrict = PGMPhysWrite(pVM,
    776                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    777                                             pbBuf + cbFirst,
    778                                             cbSecond,
    779                                             PGMACCESSORIGIN_IEM);
    780                     if (rcStrict == VINF_SUCCESS)
    781                     { /* nothing */ }
    782                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    783                     {
    784                         LogEx(LOG_GROUP_IEM,
    785                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
    786                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    787                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    788                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    789                     }
    790 #ifndef IN_RING3
    791                     else if (fPostponeFail)
    792                     {
    793                         LogEx(LOG_GROUP_IEM,
    794                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    795                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    796                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    797                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    798                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    799                         return iemSetPassUpStatus(pVCpu, rcStrict);
    800                     }
    801 #endif
    802                     else
    803                     {
    804                         LogEx(LOG_GROUP_IEM,
    805                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    806                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    807                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    808                         return rcStrict;
    809                     }
    810                 }
    811             }
    812             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    813             {
    814                 if (!cbSecond)
    815                 {
    816                     LogEx(LOG_GROUP_IEM,
    817                           ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
    818                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    819                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    820                 }
    821                 else
    822                 {
    823                     VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
    824                                                           pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    825                                                           pbBuf + cbFirst,
    826                                                           cbSecond,
    827                                                           PGMACCESSORIGIN_IEM);
    828                     if (rcStrict2 == VINF_SUCCESS)
    829                     {
    830                         LogEx(LOG_GROUP_IEM,
    831                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
    832                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    833                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    834                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    835                     }
    836                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    837                     {
    838                         LogEx(LOG_GROUP_IEM,
    839                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
    840                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    841                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    842                         PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    843                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    844                     }
    845 #ifndef IN_RING3
    846                     else if (fPostponeFail)
    847                     {
    848                         LogEx(LOG_GROUP_IEM,
    849                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    850                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    851                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    852                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    853                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    854                         return iemSetPassUpStatus(pVCpu, rcStrict);
    855                     }
    856 #endif
    857                     else
    858                     {
    859                         LogEx(LOG_GROUP_IEM,
    860                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    861                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    862                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    863                         return rcStrict2;
    864                     }
    865                 }
    866             }
    867 #ifndef IN_RING3
    868             else if (fPostponeFail)
    869             {
    870                 LogEx(LOG_GROUP_IEM,
    871                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    872                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    873                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    874                 if (!cbSecond)
    875                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
    876                 else
    877                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
    878                 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    879                 return iemSetPassUpStatus(pVCpu, rcStrict);
    880             }
    881 #endif
    882             else
    883             {
    884                 LogEx(LOG_GROUP_IEM,
    885                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    886                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    887                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    888                 return rcStrict;
    889             }
    890         }
    891         else
    892         {
    893             /*
    894              * No access handlers, much simpler.
    895              */
    896             int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
    897             if (RT_SUCCESS(rc))
    898             {
    899                 if (cbSecond)
    900                 {
    901                     rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
    902                     if (RT_SUCCESS(rc))
    903                     { /* likely */ }
    904                     else
    905                     {
    906                         LogEx(LOG_GROUP_IEM,
    907                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    908                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    909                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
    910                         return rc;
    911                     }
    912                 }
    913             }
    914             else
    915             {
    916                 LogEx(LOG_GROUP_IEM,
    917                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    918                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
    919                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    920                 return rc;
    921             }
    922         }
    923     }
    924 
    925 #if defined(IEM_LOG_MEMORY_WRITES)
    926     Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    927           RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
    928     if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
    929         Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    930               RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
    931               &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
    932 
    933     size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    934     g_cbIemWrote = cbWrote;
    935     memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
    936 #endif
    937 
    938     /*
    939      * Free the mapping entry.
    940      */
    941     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    942     Assert(pVCpu->iem.s.cActiveMappings != 0);
    943     pVCpu->iem.s.cActiveMappings--;
    944     return VINF_SUCCESS;
    945 }
    946 
    947 
    948 /**
    949  * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
    950  * @todo duplicated
    951  */
    952 DECL_FORCE_INLINE(uint32_t)
    953 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
    954 {
    955     bool const  fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
    956     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    957         return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    958     return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    959 }
    960 
    961 
    962 /**
    963  * iemMemMap worker that deals with a request crossing pages.
    964  */
    965 VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
    966                                             size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
    967 {
    968     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
    969     Assert(cbMem <= GUEST_PAGE_SIZE);
    970 
    971     /*
    972      * Do the address translations.
    973      */
    974     uint32_t const cbFirstPage  = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
    975     RTGCPHYS GCPhysFirst;
    976     VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
    977     if (rcStrict != VINF_SUCCESS)
    978         return rcStrict;
    979     Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
    980 
    981     uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
    982     RTGCPHYS GCPhysSecond;
    983     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    984                                                  cbSecondPage, fAccess, &GCPhysSecond);
    985     if (rcStrict != VINF_SUCCESS)
    986         return rcStrict;
    987     Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
    988     GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
    989 
    990     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    991 
    992     /*
    993      * Check for data breakpoints.
    994      */
    995     if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
    996     { /* likely */ }
    997     else
    998     {
    999         uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
    1000         fDataBps         |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    1001                                                       cbSecondPage, fAccess);
    1002         pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
    1003         if (fDataBps > 1)
    1004             LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
    1005                                   fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    1006     }
    1007 
    1008     /*
    1009      * Read in the current memory content if it's a read, execute or partial
    1010      * write access.
    1011      */
    1012     uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    1013 
    1014     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    1015     {
    1016         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    1017         {
    1018             /*
    1019              * Must carefully deal with access handler status codes here,
    1020              * makes the code a bit bloated.
    1021              */
    1022             rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
    1023             if (rcStrict == VINF_SUCCESS)
    1024             {
    1025                 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    1026                 if (rcStrict == VINF_SUCCESS)
    1027                 { /*likely */ }
    1028                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1029                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1030                 else
    1031                 {
    1032                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
    1033                                           GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    1034                     return rcStrict;
    1035                 }
    1036             }
    1037             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1038             {
    1039                 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    1040                 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    1041                 {
    1042                     PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    1043                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1044                 }
    1045                 else
    1046                 {
    1047                     LogEx(LOG_GROUP_IEM,
    1048                           ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
    1049                            GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
    1050                     return rcStrict2;
    1051                 }
    1052             }
    1053             else
    1054             {
    1055                 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    1056                                       GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    1057                 return rcStrict;
    1058             }
    1059         }
    1060         else
    1061         {
    1062             /*
    1063              * No informational status codes here, much more straight forward.
    1064              */
    1065             int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
    1066             if (RT_SUCCESS(rc))
    1067             {
    1068                 Assert(rc == VINF_SUCCESS);
    1069                 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
    1070                 if (RT_SUCCESS(rc))
    1071                     Assert(rc == VINF_SUCCESS);
    1072                 else
    1073                 {
    1074                     LogEx(LOG_GROUP_IEM,
    1075                           ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
    1076                     return rc;
    1077                 }
    1078             }
    1079             else
    1080             {
    1081                 LogEx(LOG_GROUP_IEM,
    1082                       ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
    1083                 return rc;
    1084             }
    1085         }
    1086     }
    1087 #ifdef VBOX_STRICT
    1088     else
    1089         memset(pbBuf, 0xcc, cbMem);
    1090     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    1091         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    1092 #endif
    1093     AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
    1094 
    1095     /*
    1096      * Commit the bounce buffer entry.
    1097      */
    1098     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    1099     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = GCPhysSecond;
    1100     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbFirstPage;
    1101     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = (uint16_t)cbSecondPage;
    1102     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = false;
    1103     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    1104     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    1105     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    1106     pVCpu->iem.s.cActiveMappings++;
    1107 
    1108     *ppvMem = pbBuf;
    1109     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    1110     return VINF_SUCCESS;
    1111 }
    1112 
    1113 
    1114 /**
    1115  * iemMemMap woker that deals with iemMemPageMap failures.
    1116  */
    1117 VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
    1118                                        RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
    1119 {
    1120     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
    1121 
    1122     /*
    1123      * Filter out conditions we can handle and the ones which shouldn't happen.
    1124      */
    1125     if (   rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
    1126         && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
    1127         && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
    1128     {
    1129         AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
    1130         return rcMap;
    1131     }
    1132     pVCpu->iem.s.cPotentialExits++;
    1133 
    1134     /*
    1135      * Read in the current memory content if it's a read, execute or partial
    1136      * write access.
    1137      */
    1138     uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    1139     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    1140     {
    1141         if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
    1142             memset(pbBuf, 0xff, cbMem);
    1143         else
    1144         {
    1145             int rc;
    1146             if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    1147             {
    1148                 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
    1149                 if (rcStrict == VINF_SUCCESS)
    1150                 { /* nothing */ }
    1151                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1152                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1153                 else
    1154                 {
    1155                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    1156                                           GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    1157                     return rcStrict;
    1158                 }
    1159             }
    1160             else
    1161             {
    1162                 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
    1163                 if (RT_SUCCESS(rc))
    1164                 { /* likely */ }
    1165                 else
    1166                 {
    1167                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    1168                                           GCPhysFirst, rc));
    1169                     return rc;
    1170                 }
    1171             }
    1172         }
    1173     }
    1174 #ifdef VBOX_STRICT
    1175     else
    1176         memset(pbBuf, 0xcc, cbMem);
    1177 #endif
    1178 #ifdef VBOX_STRICT
    1179     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    1180         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    1181 #endif
    1182 
    1183     /*
    1184      * Commit the bounce buffer entry.
    1185      */
    1186     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    1187     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = NIL_RTGCPHYS;
    1188     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbMem;
    1189     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = 0;
    1190     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
    1191     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    1192     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    1193     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    1194     pVCpu->iem.s.cActiveMappings++;
    1195 
    1196     *ppvMem = pbBuf;
    1197     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    1198     return VINF_SUCCESS;
    1199 }
    1200 
    1201 
    1202 
    1203 /**
    1204  * Commits the guest memory if bounce buffered and unmaps it.
    1205  *
    1206  * @returns Strict VBox status code.
    1207  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1208  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    1209  */
    1210 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1211 {
    1212     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1213     AssertMsgReturn(   (bUnmapInfo & 0x08)
    1214                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1215                     && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
    1216                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    1217                     VERR_NOT_FOUND);
    1218 
    1219     /* If it's bounce buffered, we may need to write back the buffer. */
    1220     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1221     {
    1222         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1223             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    1224     }
    1225     /* Otherwise unlock it. */
    1226     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1227         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1228 
    1229     /* Free the entry. */
    1230     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1231     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1232     pVCpu->iem.s.cActiveMappings--;
    1233     return VINF_SUCCESS;
    1234 }
    1235 
    1236 
    1237 /**
    1238  * Rolls back the guest memory (conceptually only) and unmaps it.
    1239  *
    1240  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1241  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    1242  */
    1243 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1244 {
    1245     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1246     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    1247                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1248                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1249                            == ((unsigned)bUnmapInfo >> 4),
    1250                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    1251 
    1252     /* Unlock it if necessary. */
    1253     if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1254         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1255 
    1256     /* Free the entry. */
    1257     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1258     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1259     pVCpu->iem.s.cActiveMappings--;
    1260 }
    1261 
    1262 #ifdef IEM_WITH_SETJMP
    1263 
    1264 /**
    1265  * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
    1266  *
    1267  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1268  * @param   pvMem               The mapping.
    1269  * @param   fAccess             The kind of access.
    1270  */
    1271 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1272 {
    1273     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1274     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    1275                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1276                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1277                            == ((unsigned)bUnmapInfo >> 4),
    1278                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    1279 
    1280     /* If it's bounce buffered, we may need to write back the buffer. */
    1281     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1282     {
    1283         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1284         {
    1285             VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    1286             if (rcStrict == VINF_SUCCESS)
    1287                 return;
    1288             IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1289         }
    1290     }
    1291     /* Otherwise unlock it. */
    1292     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1293         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1294 
    1295     /* Free the entry. */
    1296     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1297     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1298     pVCpu->iem.s.cActiveMappings--;
    1299 }
    1300 
    1301 
    1302 /** Fallback for iemMemCommitAndUnmapRwJmp.  */
    1303 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1304 {
    1305     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    1306     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1307 }
    1308 
    1309 
    1310 /** Fallback for iemMemCommitAndUnmapAtJmp.  */
    1311 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1312 {
    1313     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    1314     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1315 }
    1316 
    1317 
    1318 /** Fallback for iemMemCommitAndUnmapWoJmp.  */
    1319 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1320 {
    1321     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    1322     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1323 }
    1324 
    1325 
    1326 /** Fallback for iemMemCommitAndUnmapRoJmp.  */
    1327 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1328 {
    1329     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
    1330     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1331 }
    1332 
    1333 
    1334 /** Fallback for iemMemRollbackAndUnmapWo.  */
    1335 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1336 {
    1337     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    1338     iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
    1339 }
    1340 
    1341 #endif /* IEM_WITH_SETJMP */
    1342 
    1343 #ifndef IN_RING3
    1344 /**
    1345  * Commits the guest memory if bounce buffered and unmaps it, if any bounce
    1346  * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
    1347  *
    1348  * Allows the instruction to be completed and retired, while the IEM user will
    1349  * return to ring-3 immediately afterwards and do the postponed writes there.
    1350  *
    1351  * @returns VBox status code (no strict statuses).  Caller must check
    1352  *          VMCPU_FF_IEM before repeating string instructions and similar stuff.
    1353  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1354  * @param   pvMem               The mapping.
    1355  * @param   fAccess             The kind of access.
    1356  */
    1357 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1358 {
    1359     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1360     AssertMsgReturn(   (bUnmapInfo & 0x08)
    1361                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1362                     &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1363                        == ((unsigned)bUnmapInfo >> 4),
    1364                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    1365                     VERR_NOT_FOUND);
    1366 
    1367     /* If it's bounce buffered, we may need to write back the buffer. */
    1368     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1369     {
    1370         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1371             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
    1372     }
    1373     /* Otherwise unlock it. */
    1374     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1375         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1376 
    1377     /* Free the entry. */
    1378     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1379     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1380     pVCpu->iem.s.cActiveMappings--;
    1381     return VINF_SUCCESS;
    1382 }
    1383 #endif
    1384 
    1385 
    1386 /**
    1387  * Rollbacks mappings, releasing page locks and such.
    1388  *
    1389  * The caller shall only call this after checking cActiveMappings.
    1390  *
    1391  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    1392  */
    1393 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
    1394 {
    1395     Assert(pVCpu->iem.s.cActiveMappings > 0);
    1396 
    1397     uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    1398     while (iMemMap-- > 0)
    1399     {
    1400         uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
    1401         if (fAccess != IEM_ACCESS_INVALID)
    1402         {
    1403             AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
    1404             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1405             if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
    1406                 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1407             AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
    1408                       ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
    1409                        iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
    1410                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
    1411             pVCpu->iem.s.cActiveMappings--;
    1412         }
    1413     }
    1414 }
    1415 
    1416 #undef  LOG_GROUP
    1417 #define LOG_GROUP LOG_GROUP_IEM
    1418 
    1419 /** @} */
    1420 
    1421 
    1422 #ifdef LOG_ENABLED
    1423 /**
    1424  * Logs the current instruction.
    1425  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1426  * @param   fSameCtx    Set if we have the same context information as the VMM,
    1427  *                      clear if we may have already executed an instruction in
    1428  *                      our debug context. When clear, we assume IEMCPU holds
    1429  *                      valid CPU mode info.
    1430  *
    1431  *                      The @a fSameCtx parameter is now misleading and obsolete.
    1432  * @param   pszFunction The IEM function doing the execution.
    1433  */
    1434 static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
    1435 {
    1436 # ifdef IN_RING3
    1437     if (LogIs2Enabled())
    1438     {
    1439         char     szInstr[256];
    1440         uint32_t cbInstr = 0;
    1441         if (fSameCtx)
    1442             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
    1443                                DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
    1444                                szInstr, sizeof(szInstr), &cbInstr);
    1445         else
    1446         {
    1447             uint32_t fFlags = 0;
    1448             switch (IEM_GET_CPU_MODE(pVCpu))
    1449             {
    1450                 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
    1451                 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
    1452                 case IEMMODE_16BIT:
    1453                     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
    1454                         fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
    1455                     else
    1456                         fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
    1457                     break;
    1458             }
    1459             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
    1460                                szInstr, sizeof(szInstr), &cbInstr);
    1461         }
    1462 
    1463         PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    1464         Log2(("**** %s fExec=%x\n"
    1465               " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
    1466               " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
    1467               " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
    1468               " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
    1469               " %s\n"
    1470               , pszFunction, pVCpu->iem.s.fExec,
    1471               pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
    1472               pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
    1473               pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
    1474               pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
    1475               pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
    1476               szInstr));
    1477 
    1478         /* This stuff sucks atm. as it fills the log with MSRs. */
    1479         //if (LogIs3Enabled())
    1480         //    DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
    1481     }
    1482     else
    1483 # endif
    1484         LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
    1485                  pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
    1486     RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
    1487 }
    1488 #endif /* LOG_ENABLED */
    1489 
    1490 
    1491 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1492 /**
    1493  * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
    1494  * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
    1495  *
    1496  * @returns Modified rcStrict.
    1497  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    1498  * @param   rcStrict    The instruction execution status.
    1499  */
    1500 static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
    1501 {
    1502     Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
    1503     if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
    1504     {
    1505         /* VMX preemption timer takes priority over NMI-window exits. */
    1506         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
    1507         {
    1508             rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
    1509             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
    1510         }
    1511         /*
    1512          * Check remaining intercepts.
    1513          *
    1514          * NMI-window and Interrupt-window VM-exits.
    1515          * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
    1516          * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
    1517          *
    1518          * See Intel spec. 26.7.6 "NMI-Window Exiting".
    1519          * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    1520          */
    1521         else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
    1522                  && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    1523                  && !TRPMHasTrap(pVCpu))
    1524         {
    1525             Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
    1526             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
    1527                 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
    1528             {
    1529                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
    1530                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
    1531             }
    1532             else if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
    1533                      && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
    1534             {
    1535                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
    1536                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
    1537             }
    1538         }
    1539     }
    1540     /* TPR-below threshold/APIC write has the highest priority. */
    1541     else  if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
    1542     {
    1543         rcStrict = iemVmxApicWriteEmulation(pVCpu);
    1544         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    1545         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
    1546     }
    1547     /* MTF takes priority over VMX-preemption timer. */
    1548     else
    1549     {
    1550         rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
    1551         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    1552         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
    1553     }
    1554     return rcStrict;
    1555 }
    1556 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1557 
    1558 
    1559 /**
    1560  * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
    1561  * IEMExecOneBypass and friends.
    1562  *
    1563  * Similar code is found in IEMExecLots.
    1564  *
    1565  * @return  Strict VBox status code.
    1566  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1567  * @param   fExecuteInhibit     If set, execute the instruction following CLI,
    1568  *                      POP SS and MOV SS,GR.
    1569  * @param   pszFunction The calling function name.
    1570  */
    1571 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
    1572 {
    1573     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1574     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1575     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1576     RT_NOREF_PV(pszFunction);
    1577 
    1578 #ifdef IEM_WITH_SETJMP
    1579     VBOXSTRICTRC rcStrict;
    1580     IEM_TRY_SETJMP(pVCpu, rcStrict)
    1581     {
    1582         uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1583         rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1584     }
    1585     IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1586     {
    1587         pVCpu->iem.s.cLongJumps++;
    1588     }
    1589     IEM_CATCH_LONGJMP_END(pVCpu);
    1590 #else
    1591     uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1592     VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1593 #endif
    1594     if (rcStrict == VINF_SUCCESS)
    1595         pVCpu->iem.s.cInstructions++;
    1596     if (pVCpu->iem.s.cActiveMappings > 0)
    1597     {
    1598         Assert(rcStrict != VINF_SUCCESS);
    1599         iemMemRollback(pVCpu);
    1600     }
    1601     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1602     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1603     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1604 
    1605 //#ifdef DEBUG
    1606 //    AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
    1607 //#endif
    1608 
    1609 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1610     /*
    1611      * Perform any VMX nested-guest instruction boundary actions.
    1612      *
    1613      * If any of these causes a VM-exit, we must skip executing the next
    1614      * instruction (would run into stale page tables). A VM-exit makes sure
    1615      * there is no interrupt-inhibition, so that should ensure we don't go
    1616      * to try execute the next instruction. Clearing fExecuteInhibit is
    1617      * problematic because of the setjmp/longjmp clobbering above.
    1618      */
    1619     if (   !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1620                                      | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
    1621         || rcStrict != VINF_SUCCESS)
    1622     { /* likely */ }
    1623     else
    1624         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1625 #endif
    1626 
    1627     /* Execute the next instruction as well if a cli, pop ss or
    1628        mov ss, Gr has just completed successfully. */
    1629     if (   fExecuteInhibit
    1630         && rcStrict == VINF_SUCCESS
    1631         && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    1632     {
    1633         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
    1634         if (rcStrict == VINF_SUCCESS)
    1635         {
    1636 #ifdef LOG_ENABLED
    1637             iemLogCurInstr(pVCpu, false, pszFunction);
    1638 #endif
    1639 #ifdef IEM_WITH_SETJMP
    1640             IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
    1641             {
    1642                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1643                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1644             }
    1645             IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1646             {
    1647                 pVCpu->iem.s.cLongJumps++;
    1648             }
    1649             IEM_CATCH_LONGJMP_END(pVCpu);
    1650 #else
    1651             IEM_OPCODE_GET_FIRST_U8(&b);
    1652             rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1653 #endif
    1654             if (rcStrict == VINF_SUCCESS)
    1655             {
    1656                 pVCpu->iem.s.cInstructions++;
    1657 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1658                 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1659                                               | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
    1660                 { /* likely */ }
    1661                 else
    1662                     rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1663 #endif
    1664             }
    1665             if (pVCpu->iem.s.cActiveMappings > 0)
    1666             {
    1667                 Assert(rcStrict != VINF_SUCCESS);
    1668                 iemMemRollback(pVCpu);
    1669             }
    1670             AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1671             AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1672             AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1673         }
    1674         else if (pVCpu->iem.s.cActiveMappings > 0)
    1675             iemMemRollback(pVCpu);
    1676         /** @todo drop this after we bake this change into RIP advancing. */
    1677         CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
    1678     }
    1679 
    1680     /*
    1681      * Return value fiddling, statistics and sanity assertions.
    1682      */
    1683     rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1684 
    1685     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    1686     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    1687     return rcStrict;
    1688 }
    1689 
    1690 
    1691 /**
    1692  * Execute one instruction.
    1693  *
    1694  * @return  Strict VBox status code.
    1695  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1696  */
    1697 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
    1698 {
    1699     AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
    1700 #ifdef LOG_ENABLED
    1701     iemLogCurInstr(pVCpu, true, "IEMExecOne");
    1702 #endif
    1703 
    1704     /*
    1705      * Do the decoding and emulation.
    1706      */
    1707     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1708     if (rcStrict == VINF_SUCCESS)
    1709         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
    1710     else if (pVCpu->iem.s.cActiveMappings > 0)
    1711         iemMemRollback(pVCpu);
    1712 
    1713     if (rcStrict != VINF_SUCCESS)
    1714         LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    1715                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    1716     return rcStrict;
    1717 }
    1718 
    1719 
    1720 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    1721                                                         const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    1722 {
    1723     VBOXSTRICTRC rcStrict;
    1724     if (   cbOpcodeBytes
    1725         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    1726     {
    1727         iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
    1728 #ifdef IEM_WITH_CODE_TLB
    1729         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    1730         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    1731         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    1732         pVCpu->iem.s.offCurInstrStart = 0;
    1733         pVCpu->iem.s.offInstrNextByte = 0;
    1734         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    1735 #else
    1736         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    1737         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    1738 #endif
    1739         rcStrict = VINF_SUCCESS;
    1740     }
    1741     else
    1742         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1743     if (rcStrict == VINF_SUCCESS)
    1744         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
    1745     else if (pVCpu->iem.s.cActiveMappings > 0)
    1746         iemMemRollback(pVCpu);
    1747 
    1748     return rcStrict;
    1749 }
    1750 
    1751 
    1752 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
    1753 {
    1754     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    1755     if (rcStrict == VINF_SUCCESS)
    1756         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");
    1757     else if (pVCpu->iem.s.cActiveMappings > 0)
    1758         iemMemRollback(pVCpu);
    1759 
    1760     return rcStrict;
    1761 }
    1762 
    1763 
    1764 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    1765                                                               const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    1766 {
    1767     VBOXSTRICTRC rcStrict;
    1768     if (   cbOpcodeBytes
    1769         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    1770     {
    1771         iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
    1772 #ifdef IEM_WITH_CODE_TLB
    1773         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    1774         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    1775         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    1776         pVCpu->iem.s.offCurInstrStart = 0;
    1777         pVCpu->iem.s.offInstrNextByte = 0;
    1778         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    1779 #else
    1780         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    1781         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    1782 #endif
    1783         rcStrict = VINF_SUCCESS;
    1784     }
    1785     else
    1786         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    1787     if (rcStrict == VINF_SUCCESS)
    1788         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
    1789     else if (pVCpu->iem.s.cActiveMappings > 0)
    1790         iemMemRollback(pVCpu);
    1791 
    1792     return rcStrict;
    1793 }
    1794 
    1795 
    1796 /**
    1797  * For handling split cacheline lock operations when the host has split-lock
    1798  * detection enabled.
    1799  *
    1800  * This will cause the interpreter to disregard the lock prefix and implicit
    1801  * locking (xchg).
    1802  *
    1803  * @returns Strict VBox status code.
    1804  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    1805  */
    1806 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
    1807 {
    1808     /*
    1809      * Do the decoding and emulation.
    1810      */
    1811     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
    1812     if (rcStrict == VINF_SUCCESS)
    1813         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
    1814     else if (pVCpu->iem.s.cActiveMappings > 0)
    1815         iemMemRollback(pVCpu);
    1816 
    1817     if (rcStrict != VINF_SUCCESS)
    1818         LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    1819                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    1820     return rcStrict;
    1821 }
    1822 
    1823 
    1824 /**
    1825  * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
    1826  * inject a pending TRPM trap.
    1827  */
    1828 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
    1829 {
    1830     Assert(TRPMHasTrap(pVCpu));
    1831 
    1832     if (   !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    1833         && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    1834     {
    1835         /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
    1836 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    1837         bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
    1838         if (fIntrEnabled)
    1839         {
    1840             if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
    1841                 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    1842             else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    1843                 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
    1844             else
    1845             {
    1846                 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
    1847                 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
    1848             }
    1849         }
    1850 #else
    1851         bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    1852 #endif
    1853         if (fIntrEnabled)
    1854         {
    1855             uint8_t     u8TrapNo;
    1856             TRPMEVENT   enmType;
    1857             uint32_t    uErrCode;
    1858             RTGCPTR     uCr2;
    1859             int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
    1860             AssertRC(rc2);
    1861             Assert(enmType == TRPM_HARDWARE_INT);
    1862             VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
    1863 
    1864             TRPMResetTrap(pVCpu);
    1865 
    1866 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    1867             /* Injecting an event may cause a VM-exit. */
    1868             if (   rcStrict != VINF_SUCCESS
    1869                 && rcStrict != VINF_IEM_RAISED_XCPT)
    1870                 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1871 #else
    1872             NOREF(rcStrict);
    1873 #endif
    1874         }
    1875     }
    1876 
    1877     return VINF_SUCCESS;
    1878 }
    1879 
    1880 
    1881 VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
    1882 {
    1883     uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
    1884     AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
    1885     Assert(cMaxInstructions > 0);
    1886 
    1887     /*
    1888      * See if there is an interrupt pending in TRPM, inject it if we can.
    1889      */
    1890     /** @todo What if we are injecting an exception and not an interrupt? Is that
    1891      *        possible here? For now we assert it is indeed only an interrupt. */
    1892     if (!TRPMHasTrap(pVCpu))
    1893     { /* likely */ }
    1894     else
    1895     {
    1896         VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
    1897         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1898         { /*likely */ }
    1899         else
    1900             return rcStrict;
    1901     }
    1902 
    1903     /*
    1904      * Initial decoder init w/ prefetch, then setup setjmp.
    1905      */
    1906     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1907     if (rcStrict == VINF_SUCCESS)
    1908     {
    1909 #ifdef IEM_WITH_SETJMP
    1910         pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
    1911         IEM_TRY_SETJMP(pVCpu, rcStrict)
    1912 #endif
    1913         {
    1914             /*
    1915              * The run loop.  We limit ourselves to 4096 instructions right now.
    1916              */
    1917             uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
    1918             PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1919             for (;;)
    1920             {
    1921                 /*
    1922                  * Log the state.
    1923                  */
    1924 #ifdef LOG_ENABLED
    1925                 iemLogCurInstr(pVCpu, true, "IEMExecLots");
    1926 #endif
    1927 
    1928                 /*
    1929                  * Do the decoding and emulation.
    1930                  */
    1931                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1932                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1933 #ifdef VBOX_STRICT
    1934                 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
    1935 #endif
    1936                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1937                 {
    1938                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1939                     pVCpu->iem.s.cInstructions++;
    1940 
    1941 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1942                     /* Perform any VMX nested-guest instruction boundary actions. */
    1943                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    1944                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1945                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    1946                     { /* likely */ }
    1947                     else
    1948                     {
    1949                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1950                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1951                             fCpu = pVCpu->fLocalForcedActions;
    1952                         else
    1953                         {
    1954                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1955                             break;
    1956                         }
    1957                     }
    1958 #endif
    1959                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    1960                     {
    1961 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    1962                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    1963 #endif
    1964                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    1965                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    1966                                                       | VMCPU_FF_TLB_FLUSH
    1967                                                       | VMCPU_FF_UNHALT );
    1968 
    1969                         if (RT_LIKELY(   (   !fCpu
    1970                                           || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    1971                                               && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
    1972                                       && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
    1973                         {
    1974                             if (--cMaxInstructionsGccStupidity > 0)
    1975                             {
    1976                                 /* Poll timers every now an then according to the caller's specs. */
    1977                                 if (   (cMaxInstructionsGccStupidity & cPollRate) != 0
    1978                                     || !TMTimerPollBool(pVM, pVCpu))
    1979                                 {
    1980                                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1981                                     iemReInitDecoder(pVCpu);
    1982                                     continue;
    1983                                 }
    1984                             }
    1985                         }
    1986                     }
    1987                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1988                 }
    1989                 else if (pVCpu->iem.s.cActiveMappings > 0)
    1990                     iemMemRollback(pVCpu);
    1991                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1992                 break;
    1993             }
    1994         }
    1995 #ifdef IEM_WITH_SETJMP
    1996         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1997         {
    1998             if (pVCpu->iem.s.cActiveMappings > 0)
    1999                 iemMemRollback(pVCpu);
    2000 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    2001             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2002 # endif
    2003             pVCpu->iem.s.cLongJumps++;
    2004         }
    2005         IEM_CATCH_LONGJMP_END(pVCpu);
    2006 #endif
    2007 
    2008         /*
    2009          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    2010          */
    2011         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    2012         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    2013     }
    2014     else
    2015     {
    2016         if (pVCpu->iem.s.cActiveMappings > 0)
    2017             iemMemRollback(pVCpu);
    2018 
    2019 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    2020         /*
    2021          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    2022          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    2023          */
    2024         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2025 #endif
    2026     }
    2027 
    2028     /*
    2029      * Maybe re-enter raw-mode and log.
    2030      */
    2031     if (rcStrict != VINF_SUCCESS)
    2032         LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    2033                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    2034     if (pcInstructions)
    2035         *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
    2036     return rcStrict;
    2037 }
    2038 
    2039 
    2040 /**
    2041  * Interface used by EMExecuteExec, does exit statistics and limits.
    2042  *
    2043  * @returns Strict VBox status code.
    2044  * @param   pVCpu               The cross context virtual CPU structure.
    2045  * @param   fWillExit           To be defined.
    2046  * @param   cMinInstructions    Minimum number of instructions to execute before checking for FFs.
    2047  * @param   cMaxInstructions    Maximum number of instructions to execute.
    2048  * @param   cMaxInstructionsWithoutExits
    2049  *                              The max number of instructions without exits.
    2050  * @param   pStats              Where to return statistics.
    2051  */
    2052 VMM_INT_DECL(VBOXSTRICTRC)
    2053 IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
    2054                 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
    2055 {
    2056     NOREF(fWillExit); /** @todo define flexible exit crits */
    2057 
    2058     /*
    2059      * Initialize return stats.
    2060      */
    2061     pStats->cInstructions    = 0;
    2062     pStats->cExits           = 0;
    2063     pStats->cMaxExitDistance = 0;
    2064     pStats->cReserved        = 0;
    2065 
    2066     /*
    2067      * Initial decoder init w/ prefetch, then setup setjmp.
    2068      */
    2069     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    2070     if (rcStrict == VINF_SUCCESS)
    2071     {
    2072 #ifdef IEM_WITH_SETJMP
    2073         pVCpu->iem.s.cActiveMappings     = 0; /** @todo wtf?!? */
    2074         IEM_TRY_SETJMP(pVCpu, rcStrict)
    2075 #endif
    2076         {
    2077 #ifdef IN_RING0
    2078             bool const fCheckPreemptionPending   = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    2079 #endif
    2080             uint32_t   cInstructionSinceLastExit = 0;
    2081 
    2082             /*
    2083              * The run loop.  We limit ourselves to 4096 instructions right now.
    2084              */
    2085             PVM pVM = pVCpu->CTX_SUFF(pVM);
    2086             for (;;)
    2087             {
    2088                 /*
    2089                  * Log the state.
    2090                  */
    2091 #ifdef LOG_ENABLED
    2092                 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
    2093 #endif
    2094 
    2095                 /*
    2096                  * Do the decoding and emulation.
    2097                  */
    2098                 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
    2099 
    2100                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2101                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2102 
    2103                 if (   cPotentialExits != pVCpu->iem.s.cPotentialExits
    2104                     && cInstructionSinceLastExit > 0 /* don't count the first */ )
    2105                 {
    2106                     pStats->cExits += 1;
    2107                     if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
    2108                         pStats->cMaxExitDistance = cInstructionSinceLastExit;
    2109                     cInstructionSinceLastExit = 0;
    2110                 }
    2111 
    2112                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2113                 {
    2114                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    2115                     pVCpu->iem.s.cInstructions++;
    2116                     pStats->cInstructions++;
    2117                     cInstructionSinceLastExit++;
    2118 
    2119 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2120                     /* Perform any VMX nested-guest instruction boundary actions. */
    2121                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    2122                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    2123                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    2124                     { /* likely */ }
    2125                     else
    2126                     {
    2127                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    2128                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2129                             fCpu = pVCpu->fLocalForcedActions;
    2130                         else
    2131                         {
    2132                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2133                             break;
    2134                         }
    2135                     }
    2136 #endif
    2137                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    2138                     {
    2139 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    2140                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    2141 #endif
    2142                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    2143                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    2144                                                       | VMCPU_FF_TLB_FLUSH
    2145                                                       | VMCPU_FF_UNHALT );
    2146                         if (RT_LIKELY(   (   (   !fCpu
    2147                                               || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    2148                                                   && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
    2149                                           && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
    2150                                       || pStats->cInstructions < cMinInstructions))
    2151                         {
    2152                             if (pStats->cInstructions < cMaxInstructions)
    2153                             {
    2154                                 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
    2155                                 {
    2156 #ifdef IN_RING0
    2157                                     if (   !fCheckPreemptionPending
    2158                                         || !RTThreadPreemptIsPending(NIL_RTTHREAD))
    2159 #endif
    2160                                     {
    2161                                         Assert(pVCpu->iem.s.cActiveMappings == 0);
    2162                                         iemReInitDecoder(pVCpu);
    2163                                         continue;
    2164                                     }
    2165 #ifdef IN_RING0
    2166                                     rcStrict = VINF_EM_RAW_INTERRUPT;
    2167                                     break;
    2168 #endif
    2169                                 }
    2170                             }
    2171                         }
    2172                         Assert(!(fCpu & VMCPU_FF_IEM));
    2173                     }
    2174                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    2175                 }
    2176                 else if (pVCpu->iem.s.cActiveMappings > 0)
    2177                         iemMemRollback(pVCpu);
    2178                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2179                 break;
    2180             }
    2181         }
    2182 #ifdef IEM_WITH_SETJMP
    2183         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    2184         {
    2185             if (pVCpu->iem.s.cActiveMappings > 0)
    2186                 iemMemRollback(pVCpu);
    2187             pVCpu->iem.s.cLongJumps++;
    2188         }
    2189         IEM_CATCH_LONGJMP_END(pVCpu);
    2190 #endif
    2191 
    2192         /*
    2193          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    2194          */
    2195         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    2196         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    2197     }
    2198     else
    2199     {
    2200         if (pVCpu->iem.s.cActiveMappings > 0)
    2201             iemMemRollback(pVCpu);
    2202 
    2203 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    2204         /*
    2205          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    2206          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    2207          */
    2208         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2209 #endif
    2210     }
    2211 
    2212     /*
    2213      * Maybe re-enter raw-mode and log.
    2214      */
    2215     if (rcStrict != VINF_SUCCESS)
    2216         LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
    2217                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
    2218                  pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
    2219     return rcStrict;
    2220 }
    2221 
    2222 
    2223 /**
    2224  * Injects a trap, fault, abort, software interrupt or external interrupt.
    2225  *
    2226  * The parameter list matches TRPMQueryTrapAll pretty closely.
    2227  *
    2228  * @returns Strict VBox status code.
    2229  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    2230  * @param   u8TrapNo            The trap number.
    2231  * @param   enmType             What type is it (trap/fault/abort), software
    2232  *                              interrupt or hardware interrupt.
    2233  * @param   uErrCode            The error code if applicable.
    2234  * @param   uCr2                The CR2 value if applicable.
    2235  * @param   cbInstr             The instruction length (only relevant for
    2236  *                              software interrupts).
    2237  * @note    x86 specific, but difficult to move due to iemInitDecoder dep.
    2238  */
    2239 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
    2240                                          uint8_t cbInstr)
    2241 {
    2242     iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
    2243 #ifdef DBGFTRACE_ENABLED
    2244     RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
    2245                       u8TrapNo, enmType, uErrCode, uCr2);
    2246 #endif
    2247 
    2248     uint32_t fFlags;
    2249     switch (enmType)
    2250     {
    2251         case TRPM_HARDWARE_INT:
    2252             Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
    2253             fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
    2254             uErrCode = uCr2 = 0;
    2255             break;
    2256 
    2257         case TRPM_SOFTWARE_INT:
    2258             Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
    2259             fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
    2260             uErrCode = uCr2 = 0;
    2261             break;
    2262 
    2263         case TRPM_TRAP:
    2264         case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
    2265             Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
    2266             fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
    2267             if (u8TrapNo == X86_XCPT_PF)
    2268                 fFlags |= IEM_XCPT_FLAGS_CR2;
    2269             switch (u8TrapNo)
    2270             {
    2271                 case X86_XCPT_DF:
    2272                 case X86_XCPT_TS:
    2273                 case X86_XCPT_NP:
    2274                 case X86_XCPT_SS:
    2275                 case X86_XCPT_PF:
    2276                 case X86_XCPT_AC:
    2277                 case X86_XCPT_GP:
    2278                     fFlags |= IEM_XCPT_FLAGS_ERR;
    2279                     break;
    2280             }
    2281             break;
    2282 
    2283         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    2284     }
    2285 
    2286     VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
    2287 
    2288     if (pVCpu->iem.s.cActiveMappings > 0)
    2289         iemMemRollback(pVCpu);
    2290 
    2291     return rcStrict;
    2292 }
    2293 
    2294 
    2295 /**
    2296  * Injects the active TRPM event.
    2297  *
    2298  * @returns Strict VBox status code.
    2299  * @param   pVCpu               The cross context virtual CPU structure.
    2300  */
    2301 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
    2302 {
    2303 #ifndef IEM_IMPLEMENTS_TASKSWITCH
    2304     IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
    2305 #else
    2306     uint8_t     u8TrapNo;
    2307     TRPMEVENT   enmType;
    2308     uint32_t    uErrCode;
    2309     RTGCUINTPTR uCr2;
    2310     uint8_t     cbInstr;
    2311     int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
    2312     if (RT_FAILURE(rc))
    2313         return rc;
    2314 
    2315     /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
    2316      *        ICEBP \#DB injection as a special case. */
    2317     VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
    2318 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2319     if (rcStrict == VINF_SVM_VMEXIT)
    2320         rcStrict = VINF_SUCCESS;
    2321 #endif
    2322 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2323     if (rcStrict == VINF_VMX_VMEXIT)
    2324         rcStrict = VINF_SUCCESS;
    2325 #endif
    2326     /** @todo Are there any other codes that imply the event was successfully
    2327      *        delivered to the guest? See @bugref{6607}.  */
    2328     if (   rcStrict == VINF_SUCCESS
    2329         || rcStrict == VINF_IEM_RAISED_XCPT)
    2330         TRPMResetTrap(pVCpu);
    2331 
    2332     return rcStrict;
    2333 #endif
    2334 }
    2335 
    2336 
    2337 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
    2338 {
    2339     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    2340     return VERR_NOT_IMPLEMENTED;
    2341 }
    2342 
    2343 
    2344 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
    2345 {
    2346     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    2347     return VERR_NOT_IMPLEMENTED;
    2348 }
    2349 
    2350 #ifdef IN_RING3
    2351 
    2352 /**
    2353  * Handles the unlikely and probably fatal merge cases.
    2354  *
    2355  * @returns Merged status code.
    2356  * @param   rcStrict        Current EM status code.
    2357  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    2358  *                          with @a rcStrict.
    2359  * @param   iMemMap         The memory mapping index. For error reporting only.
    2360  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2361  *                          thread, for error reporting only.
    2362  */
    2363 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
    2364                                                           unsigned iMemMap, PVMCPUCC pVCpu)
    2365 {
    2366     if (RT_FAILURE_NP(rcStrict))
    2367         return rcStrict;
    2368 
    2369     if (RT_FAILURE_NP(rcStrictCommit))
    2370         return rcStrictCommit;
    2371 
    2372     if (rcStrict == rcStrictCommit)
    2373         return rcStrictCommit;
    2374 
    2375     AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
    2376                            VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
    2377                            pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
    2378                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
    2379                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
    2380     return VERR_IOM_FF_STATUS_IPE;
    2381 }
    2382 
    2383 
    2384 /**
    2385  * Helper for IOMR3ProcessForceFlag.
    2386  *
    2387  * @returns Merged status code.
    2388  * @param   rcStrict        Current EM status code.
    2389  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    2390  *                          with @a rcStrict.
    2391  * @param   iMemMap         The memory mapping index. For error reporting only.
    2392  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2393  *                          thread, for error reporting only.
    2394  */
    2395 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
    2396 {
    2397     /* Simple. */
    2398     if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
    2399         return rcStrictCommit;
    2400 
    2401     if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
    2402         return rcStrict;
    2403 
    2404     /* EM scheduling status codes. */
    2405     if (RT_LIKELY(   rcStrict >= VINF_EM_FIRST
    2406                   && rcStrict <= VINF_EM_LAST))
    2407     {
    2408         if (RT_LIKELY(   rcStrictCommit >= VINF_EM_FIRST
    2409                       && rcStrictCommit <= VINF_EM_LAST))
    2410             return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
    2411     }
    2412 
    2413     /* Unlikely */
    2414     return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
    2415 }
    2416 
    2417 
    2418 /**
    2419  * Called by force-flag handling code when VMCPU_FF_IEM is set.
    2420  *
    2421  * @returns Merge between @a rcStrict and what the commit operation returned.
    2422  * @param   pVM         The cross context VM structure.
    2423  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2424  * @param   rcStrict    The status code returned by ring-0 or raw-mode.
    2425  */
    2426 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    2427 {
    2428     /*
    2429      * Reset the pending commit.
    2430      */
    2431     AssertMsg(  (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
    2432               & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
    2433               ("%#x %#x %#x\n",
    2434                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    2435     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
    2436 
    2437     /*
    2438      * Commit the pending bounce buffers (usually just one).
    2439      */
    2440     unsigned cBufs = 0;
    2441     unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    2442     while (iMemMap-- > 0)
    2443         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
    2444         {
    2445             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    2446             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    2447             Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
    2448 
    2449             uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    2450             uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    2451             uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2452 
    2453             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
    2454             {
    2455                 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
    2456                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    2457                                                             pbBuf,
    2458                                                             cbFirst,
    2459                                                             PGMACCESSORIGIN_IEM);
    2460                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
    2461                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
    2462                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2463                      VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
    2464             }
    2465 
    2466             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
    2467             {
    2468                 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
    2469                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2470                                                             pbBuf + cbFirst,
    2471                                                             cbSecond,
    2472                                                             PGMACCESSORIGIN_IEM);
    2473                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
    2474                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
    2475                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
    2476                      VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
    2477             }
    2478             cBufs++;
    2479             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2480         }
    2481 
    2482     AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
    2483               ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
    2484                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    2485     pVCpu->iem.s.cActiveMappings = 0;
    2486     return rcStrict;
    2487 }
    2488 
    2489 #endif /* IN_RING3 */
    2490 
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette