VirtualBox

Changeset 108244 in vbox


Ignore:
Timestamp:
Feb 16, 2025 10:45:02 PM (4 weeks ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167565
Message:

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

Location:
trunk/src/VBox/VMM
Files:
3 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r108243 r108244  
    196196        VMMAll/IEMAll.cpp \
    197197        VMMAll/target-x86/IEMAllExec-x86.cpp \
     198        VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp \
     199        VMMAll/target-x86/IEMAllOpHlp-x86.cpp \
    198200        VMMAll/target-x86/IEMAllMem-x86.cpp \
    199         VMMAll/target-x86/IEMAllOpHlp-x86.cpp \
    200201        VMMAll/target-x86/IEMAllXcpt-x86.cpp \
    201202        VMMAll/target-x86/IEMAllHlpFpu-x86.cpp \
     
    943944        VMMAll/IEMAll.cpp \
    944945        VMMAll/target-x86/IEMAllExec-x86.cpp \
     946        VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp \
     947        VMMAll/target-x86/IEMAllOpHlp-x86.cpp \
    945948        VMMAll/target-x86/IEMAllMem-x86.cpp \
    946         VMMAll/target-x86/IEMAllOpHlp-x86.cpp \
    947949        VMMAll/target-x86/IEMAllXcpt-x86.cpp \
    948950        VMMAll/target-x86/IEMAllHlpFpu-x86.cpp \
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r108243 r108244  
    482482 *                                  - IEM_F_X86_DISREGARD_LOCK
    483483 */
    484 static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
     484DECLINLINE(VBOXSTRICTRC) iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
    485485{
    486486    iemInitDecoder(pVCpu, fExecOpts);
    487487
    488488#ifndef IEM_WITH_CODE_TLB
    489     /*
    490      * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
    491      *
    492      * First translate CS:rIP to a physical address.
    493      *
    494      * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
    495      *       all relevant bytes from the first page, as it ASSUMES it's only ever
    496      *       called for dealing with CS.LIM, page crossing and instructions that
    497      *       are too long.
    498      */
    499     uint32_t    cbToTryRead;
    500     RTGCPTR     GCPtrPC;
    501     if (IEM_IS_64BIT_CODE(pVCpu))
    502     {
    503         cbToTryRead = GUEST_PAGE_SIZE;
    504         GCPtrPC     = pVCpu->cpum.GstCtx.rip;
    505         if (IEM_IS_CANONICAL(GCPtrPC))
    506             cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
    507         else
    508             return iemRaiseGeneralProtectionFault0(pVCpu);
    509     }
    510     else
    511     {
    512         uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
    513         AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    514         if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
    515             cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
    516         else
    517             return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
    518         if (cbToTryRead) { /* likely */ }
    519         else /* overflowed */
    520         {
    521             Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
    522             cbToTryRead = UINT32_MAX;
    523         }
    524         GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
    525         Assert(GCPtrPC <= UINT32_MAX);
    526     }
    527 
    528     PGMPTWALKFAST WalkFast;
    529     int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
    530                                  IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
    531                                  &WalkFast);
    532     if (RT_SUCCESS(rc))
    533         Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
    534     else
    535     {
    536         Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
    537 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    538 /** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
    539  * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
    540         if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
    541             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    542 # endif
    543         return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
    544     }
    545 #if 0
    546     if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
    547     else
    548     {
    549         Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
    550 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    551 /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
    552 #  error completely wrong
    553         if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
    554             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    555 # endif
    556         return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    557     }
    558     if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
    559     else
    560     {
    561         Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
    562 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    563 /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
    564 #  error completely wrong.
    565         if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
    566             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    567 # endif
    568         return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    569     }
     489    return iemOpcodeFetchPrefetch(pVCpu);
    570490#else
    571     Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
    572     Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
    573 #endif
    574     RTGCPHYS const GCPhys = WalkFast.GCPhys;
    575 
    576     /*
    577      * Read the bytes at this address.
    578      */
    579     uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
    580     if (cbToTryRead > cbLeftOnPage)
    581         cbToTryRead = cbLeftOnPage;
    582     if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
    583         cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
    584 
    585     if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    586     {
    587         VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
    588         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    589         { /* likely */ }
    590         else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    591         {
    592             Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status -  rcStrict=%Rrc\n",
    593                  GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    594             rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    595         }
    596         else
    597         {
    598             Log((RT_SUCCESS(rcStrict)
    599                  ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
    600                  : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
    601                  GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    602             return rcStrict;
    603         }
    604     }
    605     else
    606     {
    607         rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
    608         if (RT_SUCCESS(rc))
    609         { /* likely */ }
    610         else
    611         {
    612             Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
    613                  GCPtrPC, GCPhys, rc, cbToTryRead));
    614             return rc;
    615         }
    616     }
    617     pVCpu->iem.s.cbOpcode = cbToTryRead;
    618 #endif /* !IEM_WITH_CODE_TLB */
    619491    return VINF_SUCCESS;
     492#endif
    620493}
    621494
     
    887760#endif
    888761}
    889 
    890 
    891 /**
    892  * Flushes the prefetch buffer, light version.
    893  */
    894 void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
    895 {
    896 #ifndef IEM_WITH_CODE_TLB
    897     pVCpu->iem.s.cbOpcode = cbInstr;
    898 #else
    899     RT_NOREF(pVCpu, cbInstr);
    900 #endif
    901 }
    902 
    903 
    904 /**
    905  * Flushes the prefetch buffer, heavy version.
    906  */
    907 void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
    908 {
    909 #ifndef IEM_WITH_CODE_TLB
    910     pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
    911 #elif 1
    912     pVCpu->iem.s.cbInstrBufTotal = 0;
    913     RT_NOREF(cbInstr);
    914 #else
    915     RT_NOREF(pVCpu, cbInstr);
    916 #endif
    917 }
    918 
    919 
    920 
    921 #ifdef IEM_WITH_CODE_TLB
    922 
    923 /**
    924  * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
    925  * failure and jumps.
    926  *
    927  * We end up here for a number of reasons:
    928  *      - pbInstrBuf isn't yet initialized.
    929  *      - Advancing beyond the buffer boundrary (e.g. cross page).
    930  *      - Advancing beyond the CS segment limit.
    931  *      - Fetching from non-mappable page (e.g. MMIO).
    932  *      - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
    933  *
    934  * @param   pVCpu               The cross context virtual CPU structure of the
    935  *                              calling thread.
    936  * @param   pvDst               Where to return the bytes.
    937  * @param   cbDst               Number of bytes to read.  A value of zero is
    938  *                              allowed for initializing pbInstrBuf (the
    939  *                              recompiler does this).  In this case it is best
    940  *                              to set pbInstrBuf to NULL prior to the call.
    941  */
    942 void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
    943 {
    944 # ifdef IN_RING3
    945     for (;;)
    946     {
    947         Assert(cbDst <= 8);
    948         uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
    949 
    950         /*
    951          * We might have a partial buffer match, deal with that first to make the
    952          * rest simpler.  This is the first part of the cross page/buffer case.
    953          */
    954         uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
    955         if (pbInstrBuf != NULL)
    956         {
    957             Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
    958             uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
    959             if (offBuf < cbInstrBuf)
    960             {
    961                 Assert(offBuf + cbDst > cbInstrBuf);
    962                 uint32_t const cbCopy = cbInstrBuf - offBuf;
    963                 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
    964 
    965                 cbDst  -= cbCopy;
    966                 pvDst   = (uint8_t *)pvDst + cbCopy;
    967                 offBuf += cbCopy;
    968             }
    969         }
    970 
    971         /*
    972          * Check segment limit, figuring how much we're allowed to access at this point.
    973          *
    974          * We will fault immediately if RIP is past the segment limit / in non-canonical
    975          * territory.  If we do continue, there are one or more bytes to read before we
    976          * end up in trouble and we need to do that first before faulting.
    977          */
    978         RTGCPTR  GCPtrFirst;
    979         uint32_t cbMaxRead;
    980         if (IEM_IS_64BIT_CODE(pVCpu))
    981         {
    982             GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
    983             if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
    984             { /* likely */ }
    985             else
    986                 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
    987             cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
    988         }
    989         else
    990         {
    991             GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
    992             /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
    993             if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
    994             { /* likely */ }
    995             else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
    996                 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
    997             cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
    998             if (cbMaxRead != 0)
    999             { /* likely */ }
    1000             else
    1001             {
    1002                 /* Overflowed because address is 0 and limit is max. */
    1003                 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
    1004                 cbMaxRead = X86_PAGE_SIZE;
    1005             }
    1006             GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
    1007             uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
    1008             if (cbMaxRead2 < cbMaxRead)
    1009                 cbMaxRead = cbMaxRead2;
    1010             /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
    1011         }
    1012 
    1013         /*
    1014          * Get the TLB entry for this piece of code.
    1015          */
    1016         uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
    1017         PIEMTLBENTRY   pTlbe     = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
    1018         if (   pTlbe->uTag               == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
    1019             || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
    1020         {
    1021             /* likely when executing lots of code, otherwise unlikely */
    1022 #  ifdef IEM_WITH_TLB_STATISTICS
    1023             pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
    1024 #  endif
    1025             Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
    1026 
    1027             /* Check TLB page table level access flags. */
    1028             if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
    1029             {
    1030                 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
    1031                 {
    1032                     Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
    1033                     iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    1034                 }
    1035                 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
    1036                 {
    1037                     Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
    1038                     iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    1039                 }
    1040             }
    1041 
    1042             /* Look up the physical page info if necessary. */
    1043             if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
    1044             { /* not necessary */ }
    1045             else
    1046             {
    1047                 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
    1048                 { /* likely */ }
    1049                 else
    1050                     iemTlbInvalidateAllPhysicalSlow(pVCpu);
    1051                 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
    1052                 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
    1053                                                     &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
    1054                 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
    1055             }
    1056         }
    1057         else
    1058         {
    1059             pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
    1060 
    1061             /* This page table walking will set A bits as required by the access while performing the walk.
    1062                ASSUMES these are set when the address is translated rather than on commit... */
    1063             /** @todo testcase: check when A bits are actually set by the CPU for code.  */
    1064             PGMPTWALKFAST WalkFast;
    1065             int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
    1066                                          IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
    1067                                          &WalkFast);
    1068             if (RT_SUCCESS(rc))
    1069                 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
    1070             else
    1071             {
    1072 #  ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    1073                 /** @todo Nested VMX: Need to handle EPT violation/misconfig here?  OF COURSE! */
    1074                 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
    1075 #  endif
    1076                 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
    1077                 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
    1078             }
    1079 
    1080             AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
    1081             if (   !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
    1082                 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
    1083             {
    1084                 pTlbe--;
    1085                 pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
    1086                 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
    1087                     iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
    1088 #  ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
    1089                 else
    1090                     ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
    1091 #  endif
    1092             }
    1093             else
    1094             {
    1095                 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
    1096                 pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
    1097                 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
    1098                     iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
    1099 #  ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
    1100                 else
    1101                     ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
    1102 #  endif
    1103             }
    1104             pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
    1105                                     | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
    1106                                     | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
    1107             RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
    1108             pTlbe->GCPhys           = GCPhysPg;
    1109             pTlbe->pbMappingR3      = NULL;
    1110             Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
    1111             Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
    1112             Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
    1113 
    1114             if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
    1115                 IEMTLBTRACE_LOAD(       pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
    1116             else
    1117                 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
    1118 
    1119             /* Resolve the physical address. */
    1120             if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
    1121             { /* likely */ }
    1122             else
    1123                 iemTlbInvalidateAllPhysicalSlow(pVCpu);
    1124             Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
    1125             rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
    1126                                             &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
    1127             AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
    1128         }
    1129 
    1130 # if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
    1131         /*
    1132          * Try do a direct read using the pbMappingR3 pointer.
    1133          * Note! Do not recheck the physical TLB revision number here as we have the
    1134          *       wrong response to changes in the else case.  If someone is updating
    1135          *       pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
    1136          *       pretending we always won the race.
    1137          */
    1138         if (    (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
    1139              == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
    1140         {
    1141             uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
    1142             pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
    1143             if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
    1144             {
    1145                 pVCpu->iem.s.cbInstrBuf       = offPg + RT_MIN(15, cbMaxRead);
    1146                 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
    1147             }
    1148             else
    1149             {
    1150                 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
    1151                 if (cbInstr + (uint32_t)cbDst <= 15)
    1152                 {
    1153                     pVCpu->iem.s.cbInstrBuf       = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
    1154                     pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
    1155                 }
    1156                 else
    1157                 {
    1158                     Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
    1159                          pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
    1160                     iemRaiseGeneralProtectionFault0Jmp(pVCpu);
    1161                 }
    1162             }
    1163             if (cbDst <= cbMaxRead)
    1164             {
    1165                 pVCpu->iem.s.fTbCrossedPage     |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
    1166 #  if 0 /* unused */
    1167                 pVCpu->iem.s.GCPhysInstrBufPrev  = pVCpu->iem.s.GCPhysInstrBuf;
    1168 #  endif
    1169                 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
    1170                 pVCpu->iem.s.uInstrBufPc      = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
    1171                 pVCpu->iem.s.GCPhysInstrBuf   = pTlbe->GCPhys;
    1172                 pVCpu->iem.s.pbInstrBuf       = pTlbe->pbMappingR3;
    1173                 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
    1174                     memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
    1175                 else
    1176                     Assert(!pvDst);
    1177                 return;
    1178             }
    1179             pVCpu->iem.s.pbInstrBuf = NULL;
    1180 
    1181             memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
    1182             pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
    1183         }
    1184 # else
    1185 #  error "refactor as needed"
    1186         /*
    1187          * If there is no special read handling, so we can read a bit more and
    1188          * put it in the prefetch buffer.
    1189          */
    1190         if (   cbDst < cbMaxRead
    1191             && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
    1192         {
    1193             VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
    1194                                                 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
    1195             if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1196             { /* likely */ }
    1197             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1198             {
    1199                 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status -  rcStrict=%Rrc\n",
    1200                      GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    1201                 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1202                 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
    1203             }
    1204             else
    1205             {
    1206                 Log((RT_SUCCESS(rcStrict)
    1207                      ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
    1208                      : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
    1209                      GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    1210                 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1211             }
    1212         }
    1213 # endif
    1214         /*
    1215          * Special read handling, so only read exactly what's needed.
    1216          * This is a highly unlikely scenario.
    1217          */
    1218         else
    1219         {
    1220             pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
    1221 
    1222             /* Check instruction length. */
    1223             uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
    1224             if (RT_LIKELY(cbInstr + cbDst <= 15))
    1225             { /* likely */ }
    1226             else
    1227             {
    1228                 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
    1229                      pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
    1230                 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
    1231             }
    1232 
    1233             /* Do the reading. */
    1234             uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
    1235             if (cbToRead > 0)
    1236             {
    1237                 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
    1238                                                     pvDst, cbToRead, PGMACCESSORIGIN_IEM);
    1239                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1240                 { /* likely */ }
    1241                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1242                 {
    1243                     Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status -  rcStrict=%Rrc\n",
    1244                          GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
    1245                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1246                     AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
    1247                 }
    1248                 else
    1249                 {
    1250                     Log((RT_SUCCESS(rcStrict)
    1251                          ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
    1252                          : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
    1253                          GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
    1254                     IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1255                 }
    1256             }
    1257 
    1258             /* Update the state and probably return. */
    1259             uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
    1260             pVCpu->iem.s.fTbCrossedPage     |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
    1261 #  if 0 /* unused */
    1262             pVCpu->iem.s.GCPhysInstrBufPrev  = pVCpu->iem.s.GCPhysInstrBuf;
    1263 #  endif
    1264             pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
    1265             pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
    1266             pVCpu->iem.s.cbInstrBuf       = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
    1267             pVCpu->iem.s.cbInstrBufTotal  = X86_PAGE_SIZE; /** @todo ??? */
    1268             pVCpu->iem.s.GCPhysInstrBuf   = pTlbe->GCPhys;
    1269             pVCpu->iem.s.uInstrBufPc      = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
    1270             pVCpu->iem.s.pbInstrBuf       = NULL;
    1271             if (cbToRead == cbDst)
    1272                 return;
    1273             Assert(cbToRead == cbMaxRead);
    1274         }
    1275 
    1276         /*
    1277          * More to read, loop.
    1278          */
    1279         cbDst -= cbMaxRead;
    1280         pvDst  = (uint8_t *)pvDst + cbMaxRead;
    1281     }
    1282 # else  /* !IN_RING3 */
    1283     RT_NOREF(pvDst, cbDst);
    1284     if (pvDst || cbDst)
    1285         IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
    1286 # endif /* !IN_RING3 */
    1287 }
    1288 
    1289 #else /* !IEM_WITH_CODE_TLB */
    1290 
    1291 /**
    1292  * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
    1293  * exception if it fails.
    1294  *
    1295  * @returns Strict VBox status code.
    1296  * @param   pVCpu               The cross context virtual CPU structure of the
    1297  *                              calling thread.
    1298  * @param   cbMin               The minimum number of bytes relative offOpcode
    1299  *                              that must be read.
    1300  */
    1301 VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
    1302 {
    1303     /*
    1304      * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
    1305      *
    1306      * First translate CS:rIP to a physical address.
    1307      */
    1308     uint8_t const   cbOpcode  = pVCpu->iem.s.cbOpcode;
    1309     uint8_t const   offOpcode = pVCpu->iem.s.offOpcode;
    1310     uint8_t const   cbLeft    = cbOpcode - offOpcode;
    1311     Assert(cbLeft < cbMin);
    1312     Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
    1313 
    1314     uint32_t        cbToTryRead;
    1315     RTGCPTR         GCPtrNext;
    1316     if (IEM_IS_64BIT_CODE(pVCpu))
    1317     {
    1318         GCPtrNext   = pVCpu->cpum.GstCtx.rip + cbOpcode;
    1319         if (!IEM_IS_CANONICAL(GCPtrNext))
    1320             return iemRaiseGeneralProtectionFault0(pVCpu);
    1321         cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
    1322     }
    1323     else
    1324     {
    1325         uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
    1326         /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
    1327         GCPtrNext32 += cbOpcode;
    1328         if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
    1329             /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
    1330             return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
    1331         cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
    1332         if (!cbToTryRead) /* overflowed */
    1333         {
    1334             Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
    1335             cbToTryRead = UINT32_MAX;
    1336             /** @todo check out wrapping around the code segment.  */
    1337         }
    1338         if (cbToTryRead < cbMin - cbLeft)
    1339             return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
    1340         GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
    1341 
    1342         uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
    1343         if (cbToTryRead > cbLeftOnPage)
    1344             cbToTryRead = cbLeftOnPage;
    1345     }
    1346 
    1347     /* Restrict to opcode buffer space.
    1348 
    1349        We're making ASSUMPTIONS here based on work done previously in
    1350        iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
    1351        be fetched in case of an instruction crossing two pages. */
    1352     if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
    1353         cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
    1354     if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
    1355     { /* likely */ }
    1356     else
    1357     {
    1358         Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
    1359              pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
    1360         return iemRaiseGeneralProtectionFault0(pVCpu);
    1361     }
    1362 
    1363     PGMPTWALKFAST WalkFast;
    1364     int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
    1365                                  IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
    1366                                  &WalkFast);
    1367     if (RT_SUCCESS(rc))
    1368         Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
    1369     else
    1370     {
    1371         Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
    1372 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    1373         if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
    1374             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    1375 #endif
    1376         return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
    1377     }
    1378     Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
    1379     Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
    1380 
    1381     RTGCPHYS const GCPhys = WalkFast.GCPhys;
    1382     Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n",  GCPtrNext,  GCPhys, cbOpcode));
    1383 
    1384     /*
    1385      * Read the bytes at this address.
    1386      *
    1387      * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
    1388      * and since PATM should only patch the start of an instruction there
    1389      * should be no need to check again here.
    1390      */
    1391     if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    1392     {
    1393         VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
    1394                                             cbToTryRead, PGMACCESSORIGIN_IEM);
    1395         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1396         { /* likely */ }
    1397         else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1398         {
    1399             Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status -  rcStrict=%Rrc\n",
    1400                  GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    1401             rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1402         }
    1403         else
    1404         {
    1405             Log((RT_SUCCESS(rcStrict)
    1406                  ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
    1407                  : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
    1408                  GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    1409             return rcStrict;
    1410         }
    1411     }
    1412     else
    1413     {
    1414         rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
    1415         if (RT_SUCCESS(rc))
    1416         { /* likely */ }
    1417         else
    1418         {
    1419             Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
    1420             return rc;
    1421         }
    1422     }
    1423     pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
    1424     Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
    1425 
    1426     return VINF_SUCCESS;
    1427 }
    1428 
    1429 #endif /* !IEM_WITH_CODE_TLB */
    1430 #ifndef IEM_WITH_SETJMP
    1431 
    1432 /**
    1433  * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
    1434  *
    1435  * @returns Strict VBox status code.
    1436  * @param   pVCpu               The cross context virtual CPU structure of the
    1437  *                              calling thread.
    1438  * @param   pb                  Where to return the opcode byte.
    1439  */
    1440 VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
    1441 {
    1442     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
    1443     if (rcStrict == VINF_SUCCESS)
    1444     {
    1445         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1446         *pb = pVCpu->iem.s.abOpcode[offOpcode];
    1447         pVCpu->iem.s.offOpcode = offOpcode + 1;
    1448     }
    1449     else
    1450         *pb = 0;
    1451     return rcStrict;
    1452 }
    1453 
    1454 #else  /* IEM_WITH_SETJMP */
    1455 
    1456 /**
    1457  * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
    1458  *
    1459  * @returns The opcode byte.
    1460  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1461  */
    1462 uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    1463 {
    1464 # ifdef IEM_WITH_CODE_TLB
    1465     uint8_t u8;
    1466     iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
    1467     return u8;
    1468 # else
    1469     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
    1470     if (rcStrict == VINF_SUCCESS)
    1471         return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
    1472     IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1473 # endif
    1474 }
    1475 
    1476 #endif /* IEM_WITH_SETJMP */
    1477 
    1478 #ifndef IEM_WITH_SETJMP
    1479 
    1480 /**
    1481  * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
    1482  *
    1483  * @returns Strict VBox status code.
    1484  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1485  * @param   pu16                Where to return the opcode dword.
    1486  */
    1487 VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
    1488 {
    1489     uint8_t      u8;
    1490     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    1491     if (rcStrict == VINF_SUCCESS)
    1492         *pu16 = (int8_t)u8;
    1493     return rcStrict;
    1494 }
    1495 
    1496 
    1497 /**
    1498  * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
    1499  *
    1500  * @returns Strict VBox status code.
    1501  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1502  * @param   pu32                Where to return the opcode dword.
    1503  */
    1504 VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    1505 {
    1506     uint8_t      u8;
    1507     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    1508     if (rcStrict == VINF_SUCCESS)
    1509         *pu32 = (int8_t)u8;
    1510     return rcStrict;
    1511 }
    1512 
    1513 
    1514 /**
    1515  * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
    1516  *
    1517  * @returns Strict VBox status code.
    1518  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1519  * @param   pu64                Where to return the opcode qword.
    1520  */
    1521 VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    1522 {
    1523     uint8_t      u8;
    1524     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    1525     if (rcStrict == VINF_SUCCESS)
    1526         *pu64 = (int8_t)u8;
    1527     return rcStrict;
    1528 }
    1529 
    1530 #endif /* !IEM_WITH_SETJMP */
    1531 
    1532 
    1533 #ifndef IEM_WITH_SETJMP
    1534 
    1535 /**
    1536  * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
    1537  *
    1538  * @returns Strict VBox status code.
    1539  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1540  * @param   pu16                Where to return the opcode word.
    1541  */
    1542 VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
    1543 {
    1544     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    1545     if (rcStrict == VINF_SUCCESS)
    1546     {
    1547         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1548 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    1549         *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    1550 # else
    1551         *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    1552 # endif
    1553         pVCpu->iem.s.offOpcode = offOpcode + 2;
    1554     }
    1555     else
    1556         *pu16 = 0;
    1557     return rcStrict;
    1558 }
    1559 
    1560 #else  /* IEM_WITH_SETJMP */
    1561 
    1562 /**
    1563  * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
    1564  *
    1565  * @returns The opcode word.
    1566  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1567  */
    1568 uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    1569 {
    1570 # ifdef IEM_WITH_CODE_TLB
    1571     uint16_t u16;
    1572     iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
    1573     return u16;
    1574 # else
    1575     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    1576     if (rcStrict == VINF_SUCCESS)
    1577     {
    1578         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1579         pVCpu->iem.s.offOpcode += 2;
    1580 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    1581         return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    1582 #  else
    1583         return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    1584 #  endif
    1585     }
    1586     IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1587 # endif
    1588 }
    1589 
    1590 #endif /* IEM_WITH_SETJMP */
    1591 
    1592 #ifndef IEM_WITH_SETJMP
    1593 
    1594 /**
    1595  * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
    1596  *
    1597  * @returns Strict VBox status code.
    1598  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1599  * @param   pu32                Where to return the opcode double word.
    1600  */
    1601 VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    1602 {
    1603     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    1604     if (rcStrict == VINF_SUCCESS)
    1605     {
    1606         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1607         *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    1608         pVCpu->iem.s.offOpcode = offOpcode + 2;
    1609     }
    1610     else
    1611         *pu32 = 0;
    1612     return rcStrict;
    1613 }
    1614 
    1615 
    1616 /**
    1617  * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
    1618  *
    1619  * @returns Strict VBox status code.
    1620  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1621  * @param   pu64                Where to return the opcode quad word.
    1622  */
    1623 VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    1624 {
    1625     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    1626     if (rcStrict == VINF_SUCCESS)
    1627     {
    1628         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1629         *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    1630         pVCpu->iem.s.offOpcode = offOpcode + 2;
    1631     }
    1632     else
    1633         *pu64 = 0;
    1634     return rcStrict;
    1635 }
    1636 
    1637 #endif /* !IEM_WITH_SETJMP */
    1638 
    1639 #ifndef IEM_WITH_SETJMP
    1640 
    1641 /**
    1642  * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
    1643  *
    1644  * @returns Strict VBox status code.
    1645  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1646  * @param   pu32                Where to return the opcode dword.
    1647  */
    1648 VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    1649 {
    1650     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    1651     if (rcStrict == VINF_SUCCESS)
    1652     {
    1653         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1654 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    1655         *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    1656 # else
    1657         *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1658                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    1659                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    1660                                     pVCpu->iem.s.abOpcode[offOpcode + 3]);
    1661 # endif
    1662         pVCpu->iem.s.offOpcode = offOpcode + 4;
    1663     }
    1664     else
    1665         *pu32 = 0;
    1666     return rcStrict;
    1667 }
    1668 
    1669 #else  /* IEM_WITH_SETJMP */
    1670 
    1671 /**
    1672  * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
    1673  *
    1674  * @returns The opcode dword.
    1675  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1676  */
    1677 uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    1678 {
    1679 # ifdef IEM_WITH_CODE_TLB
    1680     uint32_t u32;
    1681     iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
    1682     return u32;
    1683 # else
    1684     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    1685     if (rcStrict == VINF_SUCCESS)
    1686     {
    1687         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1688         pVCpu->iem.s.offOpcode = offOpcode + 4;
    1689 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    1690         return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    1691 #  else
    1692         return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1693                                    pVCpu->iem.s.abOpcode[offOpcode + 1],
    1694                                    pVCpu->iem.s.abOpcode[offOpcode + 2],
    1695                                    pVCpu->iem.s.abOpcode[offOpcode + 3]);
    1696 #  endif
    1697     }
    1698     IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1699 # endif
    1700 }
    1701 
    1702 #endif /* IEM_WITH_SETJMP */
    1703 
    1704 #ifndef IEM_WITH_SETJMP
    1705 
    1706 /**
    1707  * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
    1708  *
    1709  * @returns Strict VBox status code.
    1710  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1711  * @param   pu64                Where to return the opcode dword.
    1712  */
    1713 VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    1714 {
    1715     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    1716     if (rcStrict == VINF_SUCCESS)
    1717     {
    1718         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1719         *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1720                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    1721                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    1722                                     pVCpu->iem.s.abOpcode[offOpcode + 3]);
    1723         pVCpu->iem.s.offOpcode = offOpcode + 4;
    1724     }
    1725     else
    1726         *pu64 = 0;
    1727     return rcStrict;
    1728 }
    1729 
    1730 
    1731 /**
    1732  * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
    1733  *
    1734  * @returns Strict VBox status code.
    1735  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1736  * @param   pu64                Where to return the opcode qword.
    1737  */
    1738 VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    1739 {
    1740     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    1741     if (rcStrict == VINF_SUCCESS)
    1742     {
    1743         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1744         *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1745                                              pVCpu->iem.s.abOpcode[offOpcode + 1],
    1746                                              pVCpu->iem.s.abOpcode[offOpcode + 2],
    1747                                              pVCpu->iem.s.abOpcode[offOpcode + 3]);
    1748         pVCpu->iem.s.offOpcode = offOpcode + 4;
    1749     }
    1750     else
    1751         *pu64 = 0;
    1752     return rcStrict;
    1753 }
    1754 
    1755 #endif /* !IEM_WITH_SETJMP */
    1756 
    1757 #ifndef IEM_WITH_SETJMP
    1758 
    1759 /**
    1760  * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
    1761  *
    1762  * @returns Strict VBox status code.
    1763  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1764  * @param   pu64                Where to return the opcode qword.
    1765  */
    1766 VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    1767 {
    1768     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
    1769     if (rcStrict == VINF_SUCCESS)
    1770     {
    1771         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1772 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    1773         *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    1774 # else
    1775         *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1776                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    1777                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    1778                                     pVCpu->iem.s.abOpcode[offOpcode + 3],
    1779                                     pVCpu->iem.s.abOpcode[offOpcode + 4],
    1780                                     pVCpu->iem.s.abOpcode[offOpcode + 5],
    1781                                     pVCpu->iem.s.abOpcode[offOpcode + 6],
    1782                                     pVCpu->iem.s.abOpcode[offOpcode + 7]);
    1783 # endif
    1784         pVCpu->iem.s.offOpcode = offOpcode + 8;
    1785     }
    1786     else
    1787         *pu64 = 0;
    1788     return rcStrict;
    1789 }
    1790 
    1791 #else  /* IEM_WITH_SETJMP */
    1792 
    1793 /**
    1794  * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
    1795  *
    1796  * @returns The opcode qword.
    1797  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1798  */
    1799 uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    1800 {
    1801 # ifdef IEM_WITH_CODE_TLB
    1802     uint64_t u64;
    1803     iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
    1804     return u64;
    1805 # else
    1806     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
    1807     if (rcStrict == VINF_SUCCESS)
    1808     {
    1809         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1810         pVCpu->iem.s.offOpcode = offOpcode + 8;
    1811 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    1812         return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    1813 #  else
    1814         return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1815                                    pVCpu->iem.s.abOpcode[offOpcode + 1],
    1816                                    pVCpu->iem.s.abOpcode[offOpcode + 2],
    1817                                    pVCpu->iem.s.abOpcode[offOpcode + 3],
    1818                                    pVCpu->iem.s.abOpcode[offOpcode + 4],
    1819                                    pVCpu->iem.s.abOpcode[offOpcode + 5],
    1820                                    pVCpu->iem.s.abOpcode[offOpcode + 6],
    1821                                    pVCpu->iem.s.abOpcode[offOpcode + 7]);
    1822 #  endif
    1823     }
    1824     IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1825 # endif
    1826 }
    1827 
    1828 #endif /* IEM_WITH_SETJMP */
    1829 
    1830762
    1831763
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp

    r108243 r108244  
    2525 * SPDX-License-Identifier: GPL-3.0-only
    2626 */
    27 
    28 
    29 /** @page pg_iem    IEM - Interpreted Execution Manager
    30  *
    31  * The interpreted exeuction manager (IEM) is for executing short guest code
    32  * sequences that are causing too many exits / virtualization traps.  It will
    33  * also be used to interpret single instructions, thus replacing the selective
    34  * interpreters in EM and IOM.
    35  *
    36  * Design goals:
    37  *      - Relatively small footprint, although we favour speed and correctness
    38  *        over size.
    39  *      - Reasonably fast.
    40  *      - Correctly handle lock prefixed instructions.
    41  *      - Complete instruction set - eventually.
    42  *      - Refactorable into a recompiler, maybe.
    43  *      - Replace EMInterpret*.
    44  *
    45  * Using the existing disassembler has been considered, however this is thought
    46  * to conflict with speed as the disassembler chews things a bit too much while
    47  * leaving us with a somewhat complicated state to interpret afterwards.
    48  *
    49  *
    50  * The current code is very much work in progress. You've been warned!
    51  *
    52  *
    53  * @section sec_iem_fpu_instr   FPU Instructions
    54  *
    55  * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
    56  * same or equivalent instructions on the host FPU.  To make life easy, we also
    57  * let the FPU prioritize the unmasked exceptions for us.  This however, only
    58  * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
    59  * for FPU exception delivery, because with CR0.NE=0 there is a window where we
    60  * can trigger spurious FPU exceptions.
    61  *
    62  * The guest FPU state is not loaded into the host CPU and kept there till we
    63  * leave IEM because the calling conventions have declared an all year open
    64  * season on much of the FPU state.  For instance an innocent looking call to
    65  * memcpy might end up using a whole bunch of XMM or MM registers if the
    66  * particular implementation finds it worthwhile.
    67  *
    68  *
    69  * @section sec_iem_logging     Logging
    70  *
    71  * The IEM code uses the \"IEM\" log group for the main logging. The different
    72  * logging levels/flags are generally used for the following purposes:
    73  *      - Level 1  (Log)  : Errors, exceptions, interrupts and such major events.
    74  *      - Flow  (LogFlow) : Basic enter/exit IEM state info.
    75  *      - Level 2  (Log2) : ?
    76  *      - Level 3  (Log3) : More detailed enter/exit IEM state info.
    77  *      - Level 4  (Log4) : Decoding mnemonics w/ EIP.
    78  *      - Level 5  (Log5) : Decoding details.
    79  *      - Level 6  (Log6) : Enables/disables the lockstep comparison with REM.
    80  *      - Level 7  (Log7) : iret++ execution logging.
    81  *      - Level 8  (Log8) :
    82  *      - Level 9  (Log9) :
    83  *      - Level 10 (Log10): TLBs.
    84  *      - Level 11 (Log11): Unmasked FPU exceptions.
    85  *
    86  * The \"IEM_MEM\" log group covers most of memory related details logging,
    87  * except for errors and exceptions:
    88  *      - Level 1  (Log)  : Reads.
    89  *      - Level 2  (Log2) : Read fallbacks.
    90  *      - Level 3  (Log3) : MemMap read.
    91  *      - Level 4  (Log4) : MemMap read fallbacks.
    92  *      - Level 5  (Log5) : Writes
    93  *      - Level 6  (Log6) : Write fallbacks.
    94  *      - Level 7  (Log7) : MemMap writes and read-writes.
    95  *      - Level 8  (Log8) : MemMap write and read-write fallbacks.
    96  *      - Level 9  (Log9) : Stack reads.
    97  *      - Level 10 (Log10): Stack read fallbacks.
    98  *      - Level 11 (Log11): Stack writes.
    99  *      - Level 12 (Log12): Stack write fallbacks.
    100  *      - Flow  (LogFlow) :
    101  *
    102  * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
    103  *      - Level 1  (Log)  : Errors and other major events.
    104  *      - Flow (LogFlow)  : Misc flow stuff (cleanup?)
    105  *      - Level 2  (Log2) : VM exits.
    106  *
    107  * The syscall logging level assignments:
    108  *      - Level 1: DOS and BIOS.
    109  *      - Level 2: Windows 3.x
    110  *      - Level 3: Linux.
    111  */
    112 
    113 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
    114 #ifdef _MSC_VER
    115 # pragma warning(disable:4505)
    116 #endif
    11727
    11828
     
    12737#include <VBox/vmm/iem.h>
    12838#include <VBox/vmm/cpum.h>
    129 #include <VBox/vmm/pdmapic.h>
    130 #include <VBox/vmm/pdm.h>
    13139#include <VBox/vmm/pgm.h>
    132 #include <VBox/vmm/iom.h>
    133 #include <VBox/vmm/em.h>
    134 #include <VBox/vmm/hm.h>
    135 #include <VBox/vmm/nem.h>
    136 #include <VBox/vmm/gcm.h>
    137 #include <VBox/vmm/gim.h>
    138 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    139 # include <VBox/vmm/em.h>
    140 # include <VBox/vmm/hm_svm.h>
    141 #endif
    142 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    143 # include <VBox/vmm/hmvmxinline.h>
    144 #endif
    145 #include <VBox/vmm/tm.h>
    14640#include <VBox/vmm/dbgf.h>
    147 #include <VBox/vmm/dbgftrace.h>
    14841#include "IEMInternal.h"
    14942#include <VBox/vmm/vmcc.h>
     
    15144#include <VBox/err.h>
    15245#include <VBox/param.h>
    153 #include <VBox/dis.h>
    154 #include <iprt/asm-math.h>
    155 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    156 # include <iprt/asm-amd64-x86.h>
    157 #elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
    158 # include <iprt/asm-arm.h>
    159 #endif
    16046#include <iprt/assert.h>
    16147#include <iprt/string.h>
     
    16450#include "IEMInline.h"
    16551#ifdef VBOX_VMM_TARGET_X86
    166 # include "target-x86/IEMAllTlbInline-x86.h"
     52# include "IEMAllTlbInline-x86.h"
    16753#endif
    16854
    16955
    170 /*********************************************************************************************************************************
    171 *   Global Variables                                                                                                             *
    172 *********************************************************************************************************************************/
    173 #if defined(IEM_LOG_MEMORY_WRITES)
    174 /** What IEM just wrote. */
    175 uint8_t g_abIemWrote[256];
    176 /** How much IEM just wrote. */
    177 size_t g_cbIemWrote;
    178 #endif
    179 
    180 
    181 /**
    182  * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
    183  * path.
    184  *
    185  * This will also invalidate TLB entries for any pages with active data
    186  * breakpoints on them.
    187  *
    188  * @returns IEM_F_BRK_PENDING_XXX or zero.
    189  * @param   pVCpu               The cross context virtual CPU structure of the
    190  *                              calling thread.
    191  *
    192  * @note    Don't call directly, use iemCalcExecDbgFlags instead.
    193  */
    194 uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
    195 {
    196     uint32_t fExec = 0;
    197 
    198     /*
    199      * Helper for invalidate the data TLB for breakpoint addresses.
    200      *
    201      * This is to make sure any access to the page will always trigger a TLB
    202      * load for as long as the breakpoint is enabled.
    203      */
    204 #ifdef IEM_WITH_DATA_TLB
    205 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
    206         RTGCPTR uTagNoRev = (a_uValue); \
    207         uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
    208         /** @todo do large page accounting */ \
    209         uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
    210         if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
    211             pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
    212         if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
    213             pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
    214     } while (0)
    215 #else
    216 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
    217 #endif
    218 
    219     /*
    220      * Process guest breakpoints.
    221      */
    222 #define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
    223         if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
    224         { \
    225             switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
    226             { \
    227                 case X86_DR7_RW_EO: \
    228                     fExec |= IEM_F_PENDING_BRK_INSTR; \
    229                     break; \
    230                 case X86_DR7_RW_WO: \
    231                 case X86_DR7_RW_RW: \
    232                     fExec |= IEM_F_PENDING_BRK_DATA; \
    233                     INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
    234                     break; \
    235                 case X86_DR7_RW_IO: \
    236                     fExec |= IEM_F_PENDING_BRK_X86_IO; \
    237                     break; \
    238             } \
    239         } \
    240     } while (0)
    241 
    242     uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
    243     if (fGstDr7 & X86_DR7_ENABLED_MASK)
    244     {
    245 /** @todo extract more details here to simplify matching later. */
    246 #ifdef IEM_WITH_DATA_TLB
    247         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
    248 #endif
    249         PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
    250         PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
    251         PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
    252         PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
    253     }
    254 
    255     /*
    256      * Process hypervisor breakpoints.
    257      */
    258     PVMCC const    pVM       = pVCpu->CTX_SUFF(pVM);
    259     uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
    260     if (fHyperDr7 & X86_DR7_ENABLED_MASK)
    261     {
    262 /** @todo extract more details here to simplify matching later. */
    263         PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
    264         PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
    265         PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
    266         PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
    267     }
    268 
    269     return fExec;
    270 }
    271 
    272 
    273 /**
    274  * Initializes the decoder state.
    275  *
    276  * iemReInitDecoder is mostly a copy of this function.
    277  *
    278  * @param   pVCpu               The cross context virtual CPU structure of the
    279  *                              calling thread.
    280  * @param   fExecOpts           Optional execution flags:
    281  *                                  - IEM_F_BYPASS_HANDLERS
    282  *                                  - IEM_F_X86_DISREGARD_LOCK
    283  */
    284 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
    285 {
    286     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    287     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    288     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    289     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    290     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    291     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    292     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    293     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    294     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    295     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    296 
    297     /* Execution state: */
    298     uint32_t fExec;
    299     pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
    300 
    301     /* Decoder state: */
    302     pVCpu->iem.s.enmDefAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    303     pVCpu->iem.s.enmEffAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;
    304     if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
    305     {
    306         pVCpu->iem.s.enmDefOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    307         pVCpu->iem.s.enmEffOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;
    308     }
    309     else
    310     {
    311         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    312         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    313     }
    314     pVCpu->iem.s.fPrefixes          = 0;
    315     pVCpu->iem.s.uRexReg            = 0;
    316     pVCpu->iem.s.uRexB              = 0;
    317     pVCpu->iem.s.uRexIndex          = 0;
    318     pVCpu->iem.s.idxPrefix          = 0;
    319     pVCpu->iem.s.uVex3rdReg         = 0;
    320     pVCpu->iem.s.uVexLength         = 0;
    321     pVCpu->iem.s.fEvexStuff         = 0;
    322     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    323 #ifdef IEM_WITH_CODE_TLB
    324     pVCpu->iem.s.pbInstrBuf         = NULL;
    325     pVCpu->iem.s.offInstrNextByte   = 0;
    326     pVCpu->iem.s.offCurInstrStart   = 0;
    327 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    328     pVCpu->iem.s.offOpcode          = 0;
    329 # endif
    330 # ifdef VBOX_STRICT
    331     pVCpu->iem.s.GCPhysInstrBuf     = NIL_RTGCPHYS;
    332     pVCpu->iem.s.cbInstrBuf         = UINT16_MAX;
    333     pVCpu->iem.s.cbInstrBufTotal    = UINT16_MAX;
    334     pVCpu->iem.s.uInstrBufPc        = UINT64_C(0xc0ffc0ffcff0c0ff);
    335 # endif
    336 #else
    337     pVCpu->iem.s.offOpcode          = 0;
    338     pVCpu->iem.s.cbOpcode           = 0;
    339 #endif
    340     pVCpu->iem.s.offModRm           = 0;
    341     pVCpu->iem.s.cActiveMappings    = 0;
    342     pVCpu->iem.s.iNextMapping       = 0;
    343     pVCpu->iem.s.rcPassUp           = VINF_SUCCESS;
    344 
    345 #ifdef DBGFTRACE_ENABLED
    346     switch (IEM_GET_CPU_MODE(pVCpu))
    347     {
    348         case IEMMODE_64BIT:
    349             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    350             break;
    351         case IEMMODE_32BIT:
    352             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    353             break;
    354         case IEMMODE_16BIT:
    355             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    356             break;
    357     }
    358 #endif
    359 }
    360 
    361 
    362 /**
    363  * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
    364  *
    365  * This is mostly a copy of iemInitDecoder.
    366  *
    367  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    368  */
    369 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
    370 {
    371     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    372     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    373     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    374     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    375     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    376     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    377     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    378     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    379     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    380 
    381     /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
    382     AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
    383               ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
    384 
    385     IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
    386     pVCpu->iem.s.enmDefAddrMode     = enmMode;  /** @todo check if this is correct... */
    387     pVCpu->iem.s.enmEffAddrMode     = enmMode;
    388     if (enmMode != IEMMODE_64BIT)
    389     {
    390         pVCpu->iem.s.enmDefOpSize   = enmMode;  /** @todo check if this is correct... */
    391         pVCpu->iem.s.enmEffOpSize   = enmMode;
    392     }
    393     else
    394     {
    395         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    396         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    397     }
    398     pVCpu->iem.s.fPrefixes          = 0;
    399     pVCpu->iem.s.uRexReg            = 0;
    400     pVCpu->iem.s.uRexB              = 0;
    401     pVCpu->iem.s.uRexIndex          = 0;
    402     pVCpu->iem.s.idxPrefix          = 0;
    403     pVCpu->iem.s.uVex3rdReg         = 0;
    404     pVCpu->iem.s.uVexLength         = 0;
    405     pVCpu->iem.s.fEvexStuff         = 0;
    406     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    407 #ifdef IEM_WITH_CODE_TLB
    408     if (pVCpu->iem.s.pbInstrBuf)
    409     {
    410         uint64_t off = (enmMode == IEMMODE_64BIT
    411                         ? pVCpu->cpum.GstCtx.rip
    412                         : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
    413                      - pVCpu->iem.s.uInstrBufPc;
    414         if (off < pVCpu->iem.s.cbInstrBufTotal)
    415         {
    416             pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
    417             pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
    418             if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
    419                 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
    420             else
    421                 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
    422         }
    423         else
    424         {
    425             pVCpu->iem.s.pbInstrBuf       = NULL;
    426             pVCpu->iem.s.offInstrNextByte = 0;
    427             pVCpu->iem.s.offCurInstrStart = 0;
    428             pVCpu->iem.s.cbInstrBuf       = 0;
    429             pVCpu->iem.s.cbInstrBufTotal  = 0;
    430             pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    431         }
    432     }
    433     else
    434     {
    435         pVCpu->iem.s.offInstrNextByte = 0;
    436         pVCpu->iem.s.offCurInstrStart = 0;
    437         pVCpu->iem.s.cbInstrBuf       = 0;
    438         pVCpu->iem.s.cbInstrBufTotal  = 0;
    439 # ifdef VBOX_STRICT
    440         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    441 # endif
    442     }
    443 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    444     pVCpu->iem.s.offOpcode          = 0;
    445 # endif
    446 #else  /* !IEM_WITH_CODE_TLB */
    447     pVCpu->iem.s.cbOpcode           = 0;
    448     pVCpu->iem.s.offOpcode          = 0;
    449 #endif /* !IEM_WITH_CODE_TLB */
    450     pVCpu->iem.s.offModRm           = 0;
    451     Assert(pVCpu->iem.s.cActiveMappings == 0);
    452     pVCpu->iem.s.iNextMapping       = 0;
    453     Assert(pVCpu->iem.s.rcPassUp   == VINF_SUCCESS);
    454     Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
    455 
    456 #ifdef DBGFTRACE_ENABLED
    457     switch (enmMode)
    458     {
    459         case IEMMODE_64BIT:
    460             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    461             break;
    462         case IEMMODE_32BIT:
    463             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    464             break;
    465         case IEMMODE_16BIT:
    466             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    467             break;
    468     }
    469 #endif
    470 }
    471 
    472 
    473 
     56#ifndef IEM_WITH_CODE_TLB
    47457/**
    47558 * Prefetch opcodes the first time when starting executing.
    47659 *
    47760 * @returns Strict VBox status code.
    478  * @param   pVCpu               The cross context virtual CPU structure of the
    479  *                              calling thread.
    480  * @param   fExecOpts           Optional execution flags:
    481  *                                  - IEM_F_BYPASS_HANDLERS
    482  *                                  - IEM_F_X86_DISREGARD_LOCK
    483  */
    484 static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
    485 {
    486     iemInitDecoder(pVCpu, fExecOpts);
    487 
    488 #ifndef IEM_WITH_CODE_TLB
     61 * @param   pVCpu   The cross context virtual CPU structure of the calling
     62 *                  thread.
     63 */
     64VBOXSTRICTRC iemOpcodeFetchPrefetch(PVMCPUCC pVCpu) RT_NOEXCEPT
     65{
    48966    /*
    49067     * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
     
    616193    }
    617194    pVCpu->iem.s.cbOpcode = cbToTryRead;
     195    return VINF_SUCCESS;
     196}
    618197#endif /* !IEM_WITH_CODE_TLB */
    619     return VINF_SUCCESS;
    620 }
    621 
    622 
    623 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    624 /**
    625  * Worker for iemTlbInvalidateAll.
    626  */
    627 template<bool a_fGlobal>
    628 DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
    629 {
    630     if (!a_fGlobal)
    631         pTlb->cTlsFlushes++;
    632     else
    633         pTlb->cTlsGlobalFlushes++;
    634 
    635     pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
    636     if (RT_LIKELY(pTlb->uTlbRevision != 0))
    637     { /* very likely */ }
    638     else
    639     {
    640         pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
    641         pTlb->cTlbRevisionRollovers++;
    642         unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    643         while (i-- > 0)
    644             pTlb->aEntries[i * 2].uTag = 0;
    645     }
    646 
    647     pTlb->cTlbNonGlobalLargePageCurLoads    = 0;
    648     pTlb->NonGlobalLargePageRange.uLastTag  = 0;
    649     pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
    650 
    651     if (a_fGlobal)
    652     {
    653         pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
    654         if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
    655         { /* very likely */ }
    656         else
    657         {
    658             pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
    659             pTlb->cTlbRevisionRollovers++;
    660             unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    661             while (i-- > 0)
    662                 pTlb->aEntries[i * 2 + 1].uTag = 0;
    663         }
    664 
    665         pTlb->cTlbGlobalLargePageCurLoads    = 0;
    666         pTlb->GlobalLargePageRange.uLastTag  = 0;
    667         pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
    668     }
    669 }
    670 #endif
    671 
    672 
    673 /**
    674  * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
    675  */
    676 template<bool a_fGlobal>
    677 DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
    678 {
    679 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    680     Log10(("IEMTlbInvalidateAll\n"));
    681 
    682 # ifdef IEM_WITH_CODE_TLB
    683     pVCpu->iem.s.cbInstrBufTotal = 0;
    684     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
    685     if (a_fGlobal)
    686         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
    687     else
    688         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
    689 # endif
    690 
    691 # ifdef IEM_WITH_DATA_TLB
    692     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
    693     if (a_fGlobal)
    694         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
    695     else
    696         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
    697 # endif
    698 #else
    699     RT_NOREF(pVCpu);
    700 #endif
    701 }
    702 
    703 
    704 /**
    705  * Invalidates non-global the IEM TLB entries.
    706  *
    707  * This is called internally as well as by PGM when moving GC mappings.
    708  *
    709  * @param   pVCpu       The cross context virtual CPU structure of the calling
    710  *                      thread.
    711  */
    712 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
    713 {
    714     iemTlbInvalidateAll<false>(pVCpu);
    715 }
    716 
    717 
    718 /**
    719  * Invalidates all the IEM TLB entries.
    720  *
    721  * This is called internally as well as by PGM when moving GC mappings.
    722  *
    723  * @param   pVCpu       The cross context virtual CPU structure of the calling
    724  *                      thread.
    725  */
    726 VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
    727 {
    728     iemTlbInvalidateAll<true>(pVCpu);
    729 }
    730 
    731 
    732 /**
    733  * Invalidates a page in the TLBs.
    734  *
    735  * @param   pVCpu       The cross context virtual CPU structure of the calling
    736  *                      thread.
    737  * @param   GCPtr       The address of the page to invalidate
    738  * @thread EMT(pVCpu)
    739  */
    740 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
    741 {
    742     IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
    743 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    744     Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
    745     GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
    746     Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
    747     uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
    748 
    749 # ifdef IEM_WITH_CODE_TLB
    750     iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
    751 # endif
    752 # ifdef IEM_WITH_DATA_TLB
    753     iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
    754 # endif
    755 #else
    756     NOREF(pVCpu); NOREF(GCPtr);
    757 #endif
    758 }
    759 
    760 
    761 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    762 /**
    763  * Invalid both TLBs slow fashion following a rollover.
    764  *
    765  * Worker for IEMTlbInvalidateAllPhysical,
    766  * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
    767  * iemMemMapJmp and others.
    768  *
    769  * @thread EMT(pVCpu)
    770  */
    771 void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT
    772 {
    773     Log10(("iemTlbInvalidateAllPhysicalSlow\n"));
    774     ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    775     ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    776 
    777     unsigned i;
    778 # ifdef IEM_WITH_CODE_TLB
    779     i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
    780     while (i-- > 0)
    781     {
    782         pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3       = NULL;
    783         pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    784                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    785     }
    786     pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
    787     pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    788 # endif
    789 # ifdef IEM_WITH_DATA_TLB
    790     i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
    791     while (i-- > 0)
    792     {
    793         pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3       = NULL;
    794         pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    795                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    796     }
    797     pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
    798     pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    799 # endif
    800 
    801 }
    802 #endif
    803 
    804 
    805 /**
    806  * Invalidates the host physical aspects of the IEM TLBs.
    807  *
    808  * This is called internally as well as by PGM when moving GC mappings.
    809  *
    810  * @param   pVCpu       The cross context virtual CPU structure of the calling
    811  *                      thread.
    812  * @note    Currently not used.
    813  */
    814 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
    815 {
    816 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    817     /* Note! This probably won't end up looking exactly like this, but it give an idea... */
    818     Log10(("IEMTlbInvalidateAllPhysical\n"));
    819 
    820 # ifdef IEM_WITH_CODE_TLB
    821     pVCpu->iem.s.cbInstrBufTotal = 0;
    822 # endif
    823     uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
    824     if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
    825     {
    826         pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
    827         pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    828         pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
    829         pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    830     }
    831     else
    832         iemTlbInvalidateAllPhysicalSlow(pVCpu);
    833 #else
    834     NOREF(pVCpu);
    835 #endif
    836 }
    837 
    838 
    839 /**
    840  * Invalidates the host physical aspects of the IEM TLBs.
    841  *
    842  * This is called internally as well as by PGM when moving GC mappings.
    843  *
    844  * @param   pVM         The cross context VM structure.
    845  * @param   idCpuCaller The ID of the calling EMT if available to the caller,
    846  *                      otherwise NIL_VMCPUID.
    847  * @param   enmReason   The reason we're called.
    848  *
    849  * @remarks Caller holds the PGM lock.
    850  */
    851 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
    852 {
    853 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    854     PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
    855     if (pVCpuCaller)
    856         VMCPU_ASSERT_EMT(pVCpuCaller);
    857     Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
    858 
    859     VMCC_FOR_EACH_VMCPU(pVM)
    860     {
    861 # ifdef IEM_WITH_CODE_TLB
    862         if (pVCpuCaller == pVCpu)
    863             pVCpu->iem.s.cbInstrBufTotal = 0;
    864 # endif
    865 
    866         uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
    867         uint64_t       uTlbPhysRevNew  = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
    868         if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
    869         { /* likely */}
    870         else if (pVCpuCaller != pVCpu)
    871             uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
    872         else
    873         {
    874             iemTlbInvalidateAllPhysicalSlow(pVCpu);
    875             continue;
    876         }
    877         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    878             pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    879 
    880         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    881             pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    882     }
    883     VMCC_FOR_EACH_VMCPU_END(pVM);
    884 
    885 #else
    886     RT_NOREF(pVM, idCpuCaller, enmReason);
    887 #endif
    888 }
    889198
    890199
     
    18281137#endif /* IEM_WITH_SETJMP */
    18291138
    1830 
    1831 
    1832 /** @name   Register Access.
    1833  * @{
    1834  */
    1835 
    1836 /**
    1837  * Adds a 8-bit signed jump offset to RIP/EIP/IP.
    1838  *
    1839  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    1840  * segment limit.
    1841  *
    1842  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1843  * @param   cbInstr             Instruction size.
    1844  * @param   offNextInstr        The offset of the next instruction.
    1845  * @param   enmEffOpSize        Effective operand size.
    1846  */
    1847 VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    1848                                                         IEMMODE enmEffOpSize) RT_NOEXCEPT
    1849 {
    1850     switch (enmEffOpSize)
    1851     {
    1852         case IEMMODE_16BIT:
    1853         {
    1854             uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
    1855             if (RT_LIKELY(   uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
    1856                           || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
    1857                 pVCpu->cpum.GstCtx.rip = uNewIp;
    1858             else
    1859                 return iemRaiseGeneralProtectionFault0(pVCpu);
    1860             break;
    1861         }
    1862 
    1863         case IEMMODE_32BIT:
    1864         {
    1865             Assert(!IEM_IS_64BIT_CODE(pVCpu));
    1866             Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    1867 
    1868             uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
    1869             if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    1870                 pVCpu->cpum.GstCtx.rip = uNewEip;
    1871             else
    1872                 return iemRaiseGeneralProtectionFault0(pVCpu);
    1873             break;
    1874         }
    1875 
    1876         case IEMMODE_64BIT:
    1877         {
    1878             Assert(IEM_IS_64BIT_CODE(pVCpu));
    1879 
    1880             uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    1881             if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    1882                 pVCpu->cpum.GstCtx.rip = uNewRip;
    1883             else
    1884                 return iemRaiseGeneralProtectionFault0(pVCpu);
    1885             break;
    1886         }
    1887 
    1888         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    1889     }
    1890 
    1891 #ifndef IEM_WITH_CODE_TLB
    1892     /* Flush the prefetch buffer. */
    1893     pVCpu->iem.s.cbOpcode = cbInstr;
    1894 #endif
    1895 
    1896     /*
    1897      * Clear RF and finish the instruction (maybe raise #DB).
    1898      */
    1899     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    1900 }
    1901 
    1902 
    1903 /**
    1904  * Adds a 16-bit signed jump offset to RIP/EIP/IP.
    1905  *
    1906  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    1907  * segment limit.
    1908  *
    1909  * @returns Strict VBox status code.
    1910  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1911  * @param   cbInstr             Instruction size.
    1912  * @param   offNextInstr        The offset of the next instruction.
    1913  */
    1914 VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
    1915 {
    1916     Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
    1917 
    1918     uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
    1919     if (RT_LIKELY(   uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
    1920                   || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
    1921         pVCpu->cpum.GstCtx.rip = uNewIp;
    1922     else
    1923         return iemRaiseGeneralProtectionFault0(pVCpu);
    1924 
    1925 #ifndef IEM_WITH_CODE_TLB
    1926     /* Flush the prefetch buffer. */
    1927     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    1928 #endif
    1929 
    1930     /*
    1931      * Clear RF and finish the instruction (maybe raise #DB).
    1932      */
    1933     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    1934 }
    1935 
    1936 
    1937 /**
    1938  * Adds a 32-bit signed jump offset to RIP/EIP/IP.
    1939  *
    1940  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    1941  * segment limit.
    1942  *
    1943  * @returns Strict VBox status code.
    1944  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1945  * @param   cbInstr             Instruction size.
    1946  * @param   offNextInstr        The offset of the next instruction.
    1947  * @param   enmEffOpSize        Effective operand size.
    1948  */
    1949 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
    1950                                                          IEMMODE enmEffOpSize) RT_NOEXCEPT
    1951 {
    1952     if (enmEffOpSize == IEMMODE_32BIT)
    1953     {
    1954         Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
    1955 
    1956         uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
    1957         if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    1958             pVCpu->cpum.GstCtx.rip = uNewEip;
    1959         else
    1960             return iemRaiseGeneralProtectionFault0(pVCpu);
    1961     }
    1962     else
    1963     {
    1964         Assert(enmEffOpSize == IEMMODE_64BIT);
    1965 
    1966         uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    1967         if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    1968             pVCpu->cpum.GstCtx.rip = uNewRip;
    1969         else
    1970             return iemRaiseGeneralProtectionFault0(pVCpu);
    1971     }
    1972 
    1973 #ifndef IEM_WITH_CODE_TLB
    1974     /* Flush the prefetch buffer. */
    1975     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    1976 #endif
    1977 
    1978     /*
    1979      * Clear RF and finish the instruction (maybe raise #DB).
    1980      */
    1981     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    1982 }
    1983 
    1984 /** @}  */
    1985 
    1986 
    1987 /** @name   Memory access.
    1988  *
    1989  * @{
    1990  */
    1991 
    1992 #undef  LOG_GROUP
    1993 #define LOG_GROUP LOG_GROUP_IEM_MEM
    1994 
    1995 #if 0 /*unused*/
    1996 /**
    1997  * Looks up a memory mapping entry.
    1998  *
    1999  * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
    2000  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    2001  * @param   pvMem           The memory address.
    2002  * @param   fAccess         The access to.
    2003  */
    2004 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    2005 {
    2006     Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    2007     fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
    2008     if (   pVCpu->iem.s.aMemMappings[0].pv == pvMem
    2009         && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    2010         return 0;
    2011     if (   pVCpu->iem.s.aMemMappings[1].pv == pvMem
    2012         && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    2013         return 1;
    2014     if (   pVCpu->iem.s.aMemMappings[2].pv == pvMem
    2015         && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    2016         return 2;
    2017     return VERR_NOT_FOUND;
    2018 }
    2019 #endif
    2020 
    2021 /**
    2022  * Finds a free memmap entry when using iNextMapping doesn't work.
    2023  *
    2024  * @returns Memory mapping index, 1024 on failure.
    2025  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2026  */
    2027 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
    2028 {
    2029     /*
    2030      * The easy case.
    2031      */
    2032     if (pVCpu->iem.s.cActiveMappings == 0)
    2033     {
    2034         pVCpu->iem.s.iNextMapping = 1;
    2035         return 0;
    2036     }
    2037 
    2038     /* There should be enough mappings for all instructions. */
    2039     AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
    2040 
    2041     for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
    2042         if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
    2043             return i;
    2044 
    2045     AssertFailedReturn(1024);
    2046 }
    2047 
    2048 
    2049 /**
    2050  * Commits a bounce buffer that needs writing back and unmaps it.
    2051  *
    2052  * @returns Strict VBox status code.
    2053  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    2054  * @param   iMemMap         The index of the buffer to commit.
    2055  * @param   fPostponeFail   Whether we can postpone writer failures to ring-3.
    2056  *                          Always false in ring-3, obviously.
    2057  */
    2058 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
    2059 {
    2060     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    2061     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    2062 #ifdef IN_RING3
    2063     Assert(!fPostponeFail);
    2064     RT_NOREF_PV(fPostponeFail);
    2065 #endif
    2066 
    2067     /*
    2068      * Do the writing.
    2069      */
    2070     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2071     if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
    2072     {
    2073         uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    2074         uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    2075         uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2076         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    2077         {
    2078             /*
    2079              * Carefully and efficiently dealing with access handler return
    2080              * codes make this a little bloated.
    2081              */
    2082             VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
    2083                                                  pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    2084                                                  pbBuf,
    2085                                                  cbFirst,
    2086                                                  PGMACCESSORIGIN_IEM);
    2087             if (rcStrict == VINF_SUCCESS)
    2088             {
    2089                 if (cbSecond)
    2090                 {
    2091                     rcStrict = PGMPhysWrite(pVM,
    2092                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2093                                             pbBuf + cbFirst,
    2094                                             cbSecond,
    2095                                             PGMACCESSORIGIN_IEM);
    2096                     if (rcStrict == VINF_SUCCESS)
    2097                     { /* nothing */ }
    2098                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2099                     {
    2100                         LogEx(LOG_GROUP_IEM,
    2101                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
    2102                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2103                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2104                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2105                     }
    2106 #ifndef IN_RING3
    2107                     else if (fPostponeFail)
    2108                     {
    2109                         LogEx(LOG_GROUP_IEM,
    2110                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    2111                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2112                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2113                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    2114                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    2115                         return iemSetPassUpStatus(pVCpu, rcStrict);
    2116                     }
    2117 #endif
    2118                     else
    2119                     {
    2120                         LogEx(LOG_GROUP_IEM,
    2121                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    2122                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2123                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2124                         return rcStrict;
    2125                     }
    2126                 }
    2127             }
    2128             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2129             {
    2130                 if (!cbSecond)
    2131                 {
    2132                     LogEx(LOG_GROUP_IEM,
    2133                           ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
    2134                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    2135                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2136                 }
    2137                 else
    2138                 {
    2139                     VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
    2140                                                           pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2141                                                           pbBuf + cbFirst,
    2142                                                           cbSecond,
    2143                                                           PGMACCESSORIGIN_IEM);
    2144                     if (rcStrict2 == VINF_SUCCESS)
    2145                     {
    2146                         LogEx(LOG_GROUP_IEM,
    2147                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
    2148                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    2149                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    2150                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2151                     }
    2152                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    2153                     {
    2154                         LogEx(LOG_GROUP_IEM,
    2155                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
    2156                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    2157                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    2158                         PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    2159                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2160                     }
    2161 #ifndef IN_RING3
    2162                     else if (fPostponeFail)
    2163                     {
    2164                         LogEx(LOG_GROUP_IEM,
    2165                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    2166                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2167                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2168                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    2169                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    2170                         return iemSetPassUpStatus(pVCpu, rcStrict);
    2171                     }
    2172 #endif
    2173                     else
    2174                     {
    2175                         LogEx(LOG_GROUP_IEM,
    2176                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    2177                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    2178                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    2179                         return rcStrict2;
    2180                     }
    2181                 }
    2182             }
    2183 #ifndef IN_RING3
    2184             else if (fPostponeFail)
    2185             {
    2186                 LogEx(LOG_GROUP_IEM,
    2187                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    2188                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2189                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2190                 if (!cbSecond)
    2191                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
    2192                 else
    2193                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
    2194                 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    2195                 return iemSetPassUpStatus(pVCpu, rcStrict);
    2196             }
    2197 #endif
    2198             else
    2199             {
    2200                 LogEx(LOG_GROUP_IEM,
    2201                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    2202                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    2203                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    2204                 return rcStrict;
    2205             }
    2206         }
    2207         else
    2208         {
    2209             /*
    2210              * No access handlers, much simpler.
    2211              */
    2212             int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
    2213             if (RT_SUCCESS(rc))
    2214             {
    2215                 if (cbSecond)
    2216                 {
    2217                     rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
    2218                     if (RT_SUCCESS(rc))
    2219                     { /* likely */ }
    2220                     else
    2221                     {
    2222                         LogEx(LOG_GROUP_IEM,
    2223                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    2224                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2225                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
    2226                         return rc;
    2227                     }
    2228                 }
    2229             }
    2230             else
    2231             {
    2232                 LogEx(LOG_GROUP_IEM,
    2233                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    2234                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
    2235                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    2236                 return rc;
    2237             }
    2238         }
    2239     }
    2240 
    2241 #if defined(IEM_LOG_MEMORY_WRITES)
    2242     Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    2243           RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
    2244     if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
    2245         Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2246               RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
    2247               &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
    2248 
    2249     size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    2250     g_cbIemWrote = cbWrote;
    2251     memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
    2252 #endif
    2253 
    2254     /*
    2255      * Free the mapping entry.
    2256      */
    2257     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2258     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2259     pVCpu->iem.s.cActiveMappings--;
    2260     return VINF_SUCCESS;
    2261 }
    2262 
    2263 
    2264 /**
    2265  * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
    2266  * @todo duplicated
    2267  */
    2268 DECL_FORCE_INLINE(uint32_t)
    2269 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
    2270 {
    2271     bool const  fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
    2272     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    2273         return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    2274     return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    2275 }
    2276 
    2277 
    2278 /**
    2279  * iemMemMap worker that deals with a request crossing pages.
    2280  */
    2281 VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
    2282                                             size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
    2283 {
    2284     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
    2285     Assert(cbMem <= GUEST_PAGE_SIZE);
    2286 
    2287     /*
    2288      * Do the address translations.
    2289      */
    2290     uint32_t const cbFirstPage  = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
    2291     RTGCPHYS GCPhysFirst;
    2292     VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
    2293     if (rcStrict != VINF_SUCCESS)
    2294         return rcStrict;
    2295     Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
    2296 
    2297     uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
    2298     RTGCPHYS GCPhysSecond;
    2299     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    2300                                                  cbSecondPage, fAccess, &GCPhysSecond);
    2301     if (rcStrict != VINF_SUCCESS)
    2302         return rcStrict;
    2303     Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
    2304     GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
    2305 
    2306     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2307 
    2308     /*
    2309      * Check for data breakpoints.
    2310      */
    2311     if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
    2312     { /* likely */ }
    2313     else
    2314     {
    2315         uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
    2316         fDataBps         |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    2317                                                       cbSecondPage, fAccess);
    2318         pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
    2319         if (fDataBps > 1)
    2320             LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
    2321                                   fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    2322     }
    2323 
    2324     /*
    2325      * Read in the current memory content if it's a read, execute or partial
    2326      * write access.
    2327      */
    2328     uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2329 
    2330     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    2331     {
    2332         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    2333         {
    2334             /*
    2335              * Must carefully deal with access handler status codes here,
    2336              * makes the code a bit bloated.
    2337              */
    2338             rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
    2339             if (rcStrict == VINF_SUCCESS)
    2340             {
    2341                 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    2342                 if (rcStrict == VINF_SUCCESS)
    2343                 { /*likely */ }
    2344                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2345                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2346                 else
    2347                 {
    2348                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
    2349                                           GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    2350                     return rcStrict;
    2351                 }
    2352             }
    2353             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2354             {
    2355                 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    2356                 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    2357                 {
    2358                     PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    2359                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2360                 }
    2361                 else
    2362                 {
    2363                     LogEx(LOG_GROUP_IEM,
    2364                           ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
    2365                            GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
    2366                     return rcStrict2;
    2367                 }
    2368             }
    2369             else
    2370             {
    2371                 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    2372                                       GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    2373                 return rcStrict;
    2374             }
    2375         }
    2376         else
    2377         {
    2378             /*
    2379              * No informational status codes here, much more straight forward.
    2380              */
    2381             int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
    2382             if (RT_SUCCESS(rc))
    2383             {
    2384                 Assert(rc == VINF_SUCCESS);
    2385                 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
    2386                 if (RT_SUCCESS(rc))
    2387                     Assert(rc == VINF_SUCCESS);
    2388                 else
    2389                 {
    2390                     LogEx(LOG_GROUP_IEM,
    2391                           ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
    2392                     return rc;
    2393                 }
    2394             }
    2395             else
    2396             {
    2397                 LogEx(LOG_GROUP_IEM,
    2398                       ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
    2399                 return rc;
    2400             }
    2401         }
    2402     }
    2403 #ifdef VBOX_STRICT
    2404     else
    2405         memset(pbBuf, 0xcc, cbMem);
    2406     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    2407         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    2408 #endif
    2409     AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
    2410 
    2411     /*
    2412      * Commit the bounce buffer entry.
    2413      */
    2414     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    2415     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = GCPhysSecond;
    2416     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbFirstPage;
    2417     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = (uint16_t)cbSecondPage;
    2418     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = false;
    2419     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    2420     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    2421     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    2422     pVCpu->iem.s.cActiveMappings++;
    2423 
    2424     *ppvMem = pbBuf;
    2425     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    2426     return VINF_SUCCESS;
    2427 }
    2428 
    2429 
    2430 /**
    2431  * iemMemMap woker that deals with iemMemPageMap failures.
    2432  */
    2433 VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
    2434                                        RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
    2435 {
    2436     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
    2437 
    2438     /*
    2439      * Filter out conditions we can handle and the ones which shouldn't happen.
    2440      */
    2441     if (   rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
    2442         && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
    2443         && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
    2444     {
    2445         AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
    2446         return rcMap;
    2447     }
    2448     pVCpu->iem.s.cPotentialExits++;
    2449 
    2450     /*
    2451      * Read in the current memory content if it's a read, execute or partial
    2452      * write access.
    2453      */
    2454     uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2455     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    2456     {
    2457         if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
    2458             memset(pbBuf, 0xff, cbMem);
    2459         else
    2460         {
    2461             int rc;
    2462             if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    2463             {
    2464                 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
    2465                 if (rcStrict == VINF_SUCCESS)
    2466                 { /* nothing */ }
    2467                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2468                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2469                 else
    2470                 {
    2471                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    2472                                           GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    2473                     return rcStrict;
    2474                 }
    2475             }
    2476             else
    2477             {
    2478                 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
    2479                 if (RT_SUCCESS(rc))
    2480                 { /* likely */ }
    2481                 else
    2482                 {
    2483                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    2484                                           GCPhysFirst, rc));
    2485                     return rc;
    2486                 }
    2487             }
    2488         }
    2489     }
    2490 #ifdef VBOX_STRICT
    2491     else
    2492         memset(pbBuf, 0xcc, cbMem);
    2493 #endif
    2494 #ifdef VBOX_STRICT
    2495     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    2496         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    2497 #endif
    2498 
    2499     /*
    2500      * Commit the bounce buffer entry.
    2501      */
    2502     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    2503     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = NIL_RTGCPHYS;
    2504     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbMem;
    2505     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = 0;
    2506     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
    2507     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    2508     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    2509     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    2510     pVCpu->iem.s.cActiveMappings++;
    2511 
    2512     *ppvMem = pbBuf;
    2513     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    2514     return VINF_SUCCESS;
    2515 }
    2516 
    2517 
    2518 
    2519 /**
    2520  * Commits the guest memory if bounce buffered and unmaps it.
    2521  *
    2522  * @returns Strict VBox status code.
    2523  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2524  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    2525  */
    2526 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    2527 {
    2528     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    2529     AssertMsgReturn(   (bUnmapInfo & 0x08)
    2530                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    2531                     && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
    2532                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    2533                     VERR_NOT_FOUND);
    2534 
    2535     /* If it's bounce buffered, we may need to write back the buffer. */
    2536     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    2537     {
    2538         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    2539             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    2540     }
    2541     /* Otherwise unlock it. */
    2542     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    2543         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2544 
    2545     /* Free the entry. */
    2546     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2547     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2548     pVCpu->iem.s.cActiveMappings--;
    2549     return VINF_SUCCESS;
    2550 }
    2551 
    2552 
    2553 /**
    2554  * Rolls back the guest memory (conceptually only) and unmaps it.
    2555  *
    2556  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2557  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    2558  */
    2559 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    2560 {
    2561     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    2562     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    2563                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    2564                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    2565                            == ((unsigned)bUnmapInfo >> 4),
    2566                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    2567 
    2568     /* Unlock it if necessary. */
    2569     if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    2570         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2571 
    2572     /* Free the entry. */
    2573     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2574     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2575     pVCpu->iem.s.cActiveMappings--;
    2576 }
    2577 
    2578 #ifdef IEM_WITH_SETJMP
    2579 
    2580 /**
    2581  * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
    2582  *
    2583  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2584  * @param   pvMem               The mapping.
    2585  * @param   fAccess             The kind of access.
    2586  */
    2587 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2588 {
    2589     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    2590     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    2591                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    2592                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    2593                            == ((unsigned)bUnmapInfo >> 4),
    2594                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    2595 
    2596     /* If it's bounce buffered, we may need to write back the buffer. */
    2597     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    2598     {
    2599         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    2600         {
    2601             VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    2602             if (rcStrict == VINF_SUCCESS)
    2603                 return;
    2604             IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    2605         }
    2606     }
    2607     /* Otherwise unlock it. */
    2608     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    2609         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2610 
    2611     /* Free the entry. */
    2612     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2613     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2614     pVCpu->iem.s.cActiveMappings--;
    2615 }
    2616 
    2617 
    2618 /** Fallback for iemMemCommitAndUnmapRwJmp.  */
    2619 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2620 {
    2621     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    2622     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    2623 }
    2624 
    2625 
    2626 /** Fallback for iemMemCommitAndUnmapAtJmp.  */
    2627 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2628 {
    2629     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    2630     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    2631 }
    2632 
    2633 
    2634 /** Fallback for iemMemCommitAndUnmapWoJmp.  */
    2635 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2636 {
    2637     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    2638     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    2639 }
    2640 
    2641 
    2642 /** Fallback for iemMemCommitAndUnmapRoJmp.  */
    2643 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    2644 {
    2645     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
    2646     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    2647 }
    2648 
    2649 
    2650 /** Fallback for iemMemRollbackAndUnmapWo.  */
    2651 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    2652 {
    2653     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    2654     iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
    2655 }
    2656 
    2657 #endif /* IEM_WITH_SETJMP */
    2658 
    2659 #ifndef IN_RING3
    2660 /**
    2661  * Commits the guest memory if bounce buffered and unmaps it, if any bounce
    2662  * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
    2663  *
    2664  * Allows the instruction to be completed and retired, while the IEM user will
    2665  * return to ring-3 immediately afterwards and do the postponed writes there.
    2666  *
    2667  * @returns VBox status code (no strict statuses).  Caller must check
    2668  *          VMCPU_FF_IEM before repeating string instructions and similar stuff.
    2669  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2670  * @param   pvMem               The mapping.
    2671  * @param   fAccess             The kind of access.
    2672  */
    2673 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    2674 {
    2675     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    2676     AssertMsgReturn(   (bUnmapInfo & 0x08)
    2677                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    2678                     &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    2679                        == ((unsigned)bUnmapInfo >> 4),
    2680                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    2681                     VERR_NOT_FOUND);
    2682 
    2683     /* If it's bounce buffered, we may need to write back the buffer. */
    2684     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    2685     {
    2686         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    2687             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
    2688     }
    2689     /* Otherwise unlock it. */
    2690     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    2691         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2692 
    2693     /* Free the entry. */
    2694     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2695     Assert(pVCpu->iem.s.cActiveMappings != 0);
    2696     pVCpu->iem.s.cActiveMappings--;
    2697     return VINF_SUCCESS;
    2698 }
    2699 #endif
    2700 
    2701 
    2702 /**
    2703  * Rollbacks mappings, releasing page locks and such.
    2704  *
    2705  * The caller shall only call this after checking cActiveMappings.
    2706  *
    2707  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    2708  */
    2709 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
    2710 {
    2711     Assert(pVCpu->iem.s.cActiveMappings > 0);
    2712 
    2713     uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    2714     while (iMemMap-- > 0)
    2715     {
    2716         uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
    2717         if (fAccess != IEM_ACCESS_INVALID)
    2718         {
    2719             AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
    2720             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2721             if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
    2722                 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    2723             AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
    2724                       ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
    2725                        iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
    2726                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
    2727             pVCpu->iem.s.cActiveMappings--;
    2728         }
    2729     }
    2730 }
    2731 
    2732 #undef  LOG_GROUP
    2733 #define LOG_GROUP LOG_GROUP_IEM
    2734 
    2735 /** @} */
    2736 
    2737 
    2738 #ifdef LOG_ENABLED
    2739 /**
    2740  * Logs the current instruction.
    2741  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2742  * @param   fSameCtx    Set if we have the same context information as the VMM,
    2743  *                      clear if we may have already executed an instruction in
    2744  *                      our debug context. When clear, we assume IEMCPU holds
    2745  *                      valid CPU mode info.
    2746  *
    2747  *                      The @a fSameCtx parameter is now misleading and obsolete.
    2748  * @param   pszFunction The IEM function doing the execution.
    2749  */
    2750 static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
    2751 {
    2752 # ifdef IN_RING3
    2753     if (LogIs2Enabled())
    2754     {
    2755         char     szInstr[256];
    2756         uint32_t cbInstr = 0;
    2757         if (fSameCtx)
    2758             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
    2759                                DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
    2760                                szInstr, sizeof(szInstr), &cbInstr);
    2761         else
    2762         {
    2763             uint32_t fFlags = 0;
    2764             switch (IEM_GET_CPU_MODE(pVCpu))
    2765             {
    2766                 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
    2767                 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
    2768                 case IEMMODE_16BIT:
    2769                     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
    2770                         fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
    2771                     else
    2772                         fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
    2773                     break;
    2774             }
    2775             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
    2776                                szInstr, sizeof(szInstr), &cbInstr);
    2777         }
    2778 
    2779         PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    2780         Log2(("**** %s fExec=%x\n"
    2781               " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
    2782               " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
    2783               " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
    2784               " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
    2785               " %s\n"
    2786               , pszFunction, pVCpu->iem.s.fExec,
    2787               pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
    2788               pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
    2789               pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
    2790               pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
    2791               pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
    2792               szInstr));
    2793 
    2794         /* This stuff sucks atm. as it fills the log with MSRs. */
    2795         //if (LogIs3Enabled())
    2796         //    DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
    2797     }
    2798     else
    2799 # endif
    2800         LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
    2801                  pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
    2802     RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
    2803 }
    2804 #endif /* LOG_ENABLED */
    2805 
    2806 
    2807 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2808 /**
    2809  * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
    2810  * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
    2811  *
    2812  * @returns Modified rcStrict.
    2813  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    2814  * @param   rcStrict    The instruction execution status.
    2815  */
    2816 static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
    2817 {
    2818     Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
    2819     if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
    2820     {
    2821         /* VMX preemption timer takes priority over NMI-window exits. */
    2822         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
    2823         {
    2824             rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
    2825             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
    2826         }
    2827         /*
    2828          * Check remaining intercepts.
    2829          *
    2830          * NMI-window and Interrupt-window VM-exits.
    2831          * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
    2832          * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
    2833          *
    2834          * See Intel spec. 26.7.6 "NMI-Window Exiting".
    2835          * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    2836          */
    2837         else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
    2838                  && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    2839                  && !TRPMHasTrap(pVCpu))
    2840         {
    2841             Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
    2842             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
    2843                 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
    2844             {
    2845                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
    2846                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
    2847             }
    2848             else if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
    2849                      && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
    2850             {
    2851                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
    2852                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
    2853             }
    2854         }
    2855     }
    2856     /* TPR-below threshold/APIC write has the highest priority. */
    2857     else  if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
    2858     {
    2859         rcStrict = iemVmxApicWriteEmulation(pVCpu);
    2860         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    2861         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
    2862     }
    2863     /* MTF takes priority over VMX-preemption timer. */
    2864     else
    2865     {
    2866         rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
    2867         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    2868         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
    2869     }
    2870     return rcStrict;
    2871 }
    2872 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    2873 
    2874 
    2875 /**
    2876  * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
    2877  * IEMExecOneBypass and friends.
    2878  *
    2879  * Similar code is found in IEMExecLots.
    2880  *
    2881  * @return  Strict VBox status code.
    2882  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2883  * @param   fExecuteInhibit     If set, execute the instruction following CLI,
    2884  *                      POP SS and MOV SS,GR.
    2885  * @param   pszFunction The calling function name.
    2886  */
    2887 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
    2888 {
    2889     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    2890     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    2891     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    2892     RT_NOREF_PV(pszFunction);
    2893 
    2894 #ifdef IEM_WITH_SETJMP
    2895     VBOXSTRICTRC rcStrict;
    2896     IEM_TRY_SETJMP(pVCpu, rcStrict)
    2897     {
    2898         uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2899         rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2900     }
    2901     IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    2902     {
    2903         pVCpu->iem.s.cLongJumps++;
    2904     }
    2905     IEM_CATCH_LONGJMP_END(pVCpu);
    2906 #else
    2907     uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2908     VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2909 #endif
    2910     if (rcStrict == VINF_SUCCESS)
    2911         pVCpu->iem.s.cInstructions++;
    2912     if (pVCpu->iem.s.cActiveMappings > 0)
    2913     {
    2914         Assert(rcStrict != VINF_SUCCESS);
    2915         iemMemRollback(pVCpu);
    2916     }
    2917     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    2918     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    2919     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    2920 
    2921 //#ifdef DEBUG
    2922 //    AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
    2923 //#endif
    2924 
    2925 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2926     /*
    2927      * Perform any VMX nested-guest instruction boundary actions.
    2928      *
    2929      * If any of these causes a VM-exit, we must skip executing the next
    2930      * instruction (would run into stale page tables). A VM-exit makes sure
    2931      * there is no interrupt-inhibition, so that should ensure we don't go
    2932      * to try execute the next instruction. Clearing fExecuteInhibit is
    2933      * problematic because of the setjmp/longjmp clobbering above.
    2934      */
    2935     if (   !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    2936                                      | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
    2937         || rcStrict != VINF_SUCCESS)
    2938     { /* likely */ }
    2939     else
    2940         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    2941 #endif
    2942 
    2943     /* Execute the next instruction as well if a cli, pop ss or
    2944        mov ss, Gr has just completed successfully. */
    2945     if (   fExecuteInhibit
    2946         && rcStrict == VINF_SUCCESS
    2947         && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    2948     {
    2949         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
    2950         if (rcStrict == VINF_SUCCESS)
    2951         {
    2952 #ifdef LOG_ENABLED
    2953             iemLogCurInstr(pVCpu, false, pszFunction);
    2954 #endif
    2955 #ifdef IEM_WITH_SETJMP
    2956             IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
    2957             {
    2958                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2959                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2960             }
    2961             IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    2962             {
    2963                 pVCpu->iem.s.cLongJumps++;
    2964             }
    2965             IEM_CATCH_LONGJMP_END(pVCpu);
    2966 #else
    2967             IEM_OPCODE_GET_FIRST_U8(&b);
    2968             rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2969 #endif
    2970             if (rcStrict == VINF_SUCCESS)
    2971             {
    2972                 pVCpu->iem.s.cInstructions++;
    2973 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2974                 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    2975                                               | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
    2976                 { /* likely */ }
    2977                 else
    2978                     rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    2979 #endif
    2980             }
    2981             if (pVCpu->iem.s.cActiveMappings > 0)
    2982             {
    2983                 Assert(rcStrict != VINF_SUCCESS);
    2984                 iemMemRollback(pVCpu);
    2985             }
    2986             AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    2987             AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    2988             AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    2989         }
    2990         else if (pVCpu->iem.s.cActiveMappings > 0)
    2991             iemMemRollback(pVCpu);
    2992         /** @todo drop this after we bake this change into RIP advancing. */
    2993         CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
    2994     }
    2995 
    2996     /*
    2997      * Return value fiddling, statistics and sanity assertions.
    2998      */
    2999     rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3000 
    3001     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    3002     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    3003     return rcStrict;
    3004 }
    3005 
    3006 
    3007 /**
    3008  * Execute one instruction.
    3009  *
    3010  * @return  Strict VBox status code.
    3011  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    3012  */
    3013 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
    3014 {
    3015     AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
    3016 #ifdef LOG_ENABLED
    3017     iemLogCurInstr(pVCpu, true, "IEMExecOne");
    3018 #endif
    3019 
    3020     /*
    3021      * Do the decoding and emulation.
    3022      */
    3023     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    3024     if (rcStrict == VINF_SUCCESS)
    3025         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
    3026     else if (pVCpu->iem.s.cActiveMappings > 0)
    3027         iemMemRollback(pVCpu);
    3028 
    3029     if (rcStrict != VINF_SUCCESS)
    3030         LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    3031                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    3032     return rcStrict;
    3033 }
    3034 
    3035 
    3036 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    3037                                                         const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    3038 {
    3039     VBOXSTRICTRC rcStrict;
    3040     if (   cbOpcodeBytes
    3041         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    3042     {
    3043         iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
    3044 #ifdef IEM_WITH_CODE_TLB
    3045         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    3046         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    3047         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    3048         pVCpu->iem.s.offCurInstrStart = 0;
    3049         pVCpu->iem.s.offInstrNextByte = 0;
    3050         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    3051 #else
    3052         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    3053         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    3054 #endif
    3055         rcStrict = VINF_SUCCESS;
    3056     }
    3057     else
    3058         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    3059     if (rcStrict == VINF_SUCCESS)
    3060         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
    3061     else if (pVCpu->iem.s.cActiveMappings > 0)
    3062         iemMemRollback(pVCpu);
    3063 
    3064     return rcStrict;
    3065 }
    3066 
    3067 
    3068 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
    3069 {
    3070     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    3071     if (rcStrict == VINF_SUCCESS)
    3072         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");
    3073     else if (pVCpu->iem.s.cActiveMappings > 0)
    3074         iemMemRollback(pVCpu);
    3075 
    3076     return rcStrict;
    3077 }
    3078 
    3079 
    3080 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    3081                                                               const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    3082 {
    3083     VBOXSTRICTRC rcStrict;
    3084     if (   cbOpcodeBytes
    3085         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    3086     {
    3087         iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
    3088 #ifdef IEM_WITH_CODE_TLB
    3089         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    3090         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    3091         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    3092         pVCpu->iem.s.offCurInstrStart = 0;
    3093         pVCpu->iem.s.offInstrNextByte = 0;
    3094         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    3095 #else
    3096         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    3097         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    3098 #endif
    3099         rcStrict = VINF_SUCCESS;
    3100     }
    3101     else
    3102         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    3103     if (rcStrict == VINF_SUCCESS)
    3104         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
    3105     else if (pVCpu->iem.s.cActiveMappings > 0)
    3106         iemMemRollback(pVCpu);
    3107 
    3108     return rcStrict;
    3109 }
    3110 
    3111 
    3112 /**
    3113  * For handling split cacheline lock operations when the host has split-lock
    3114  * detection enabled.
    3115  *
    3116  * This will cause the interpreter to disregard the lock prefix and implicit
    3117  * locking (xchg).
    3118  *
    3119  * @returns Strict VBox status code.
    3120  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    3121  */
    3122 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
    3123 {
    3124     /*
    3125      * Do the decoding and emulation.
    3126      */
    3127     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
    3128     if (rcStrict == VINF_SUCCESS)
    3129         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
    3130     else if (pVCpu->iem.s.cActiveMappings > 0)
    3131         iemMemRollback(pVCpu);
    3132 
    3133     if (rcStrict != VINF_SUCCESS)
    3134         LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    3135                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    3136     return rcStrict;
    3137 }
    3138 
    3139 
    3140 /**
    3141  * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
    3142  * inject a pending TRPM trap.
    3143  */
    3144 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
    3145 {
    3146     Assert(TRPMHasTrap(pVCpu));
    3147 
    3148     if (   !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    3149         && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    3150     {
    3151         /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
    3152 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3153         bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
    3154         if (fIntrEnabled)
    3155         {
    3156             if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
    3157                 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    3158             else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    3159                 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
    3160             else
    3161             {
    3162                 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
    3163                 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
    3164             }
    3165         }
    3166 #else
    3167         bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    3168 #endif
    3169         if (fIntrEnabled)
    3170         {
    3171             uint8_t     u8TrapNo;
    3172             TRPMEVENT   enmType;
    3173             uint32_t    uErrCode;
    3174             RTGCPTR     uCr2;
    3175             int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
    3176             AssertRC(rc2);
    3177             Assert(enmType == TRPM_HARDWARE_INT);
    3178             VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
    3179 
    3180             TRPMResetTrap(pVCpu);
    3181 
    3182 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3183             /* Injecting an event may cause a VM-exit. */
    3184             if (   rcStrict != VINF_SUCCESS
    3185                 && rcStrict != VINF_IEM_RAISED_XCPT)
    3186                 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3187 #else
    3188             NOREF(rcStrict);
    3189 #endif
    3190         }
    3191     }
    3192 
    3193     return VINF_SUCCESS;
    3194 }
    3195 
    3196 
    3197 VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
    3198 {
    3199     uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
    3200     AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
    3201     Assert(cMaxInstructions > 0);
    3202 
    3203     /*
    3204      * See if there is an interrupt pending in TRPM, inject it if we can.
    3205      */
    3206     /** @todo What if we are injecting an exception and not an interrupt? Is that
    3207      *        possible here? For now we assert it is indeed only an interrupt. */
    3208     if (!TRPMHasTrap(pVCpu))
    3209     { /* likely */ }
    3210     else
    3211     {
    3212         VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
    3213         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3214         { /*likely */ }
    3215         else
    3216             return rcStrict;
    3217     }
    3218 
    3219     /*
    3220      * Initial decoder init w/ prefetch, then setup setjmp.
    3221      */
    3222     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    3223     if (rcStrict == VINF_SUCCESS)
    3224     {
    3225 #ifdef IEM_WITH_SETJMP
    3226         pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
    3227         IEM_TRY_SETJMP(pVCpu, rcStrict)
    3228 #endif
    3229         {
    3230             /*
    3231              * The run loop.  We limit ourselves to 4096 instructions right now.
    3232              */
    3233             uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
    3234             PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    3235             for (;;)
    3236             {
    3237                 /*
    3238                  * Log the state.
    3239                  */
    3240 #ifdef LOG_ENABLED
    3241                 iemLogCurInstr(pVCpu, true, "IEMExecLots");
    3242 #endif
    3243 
    3244                 /*
    3245                  * Do the decoding and emulation.
    3246                  */
    3247                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    3248                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    3249 #ifdef VBOX_STRICT
    3250                 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
    3251 #endif
    3252                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3253                 {
    3254                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3255                     pVCpu->iem.s.cInstructions++;
    3256 
    3257 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3258                     /* Perform any VMX nested-guest instruction boundary actions. */
    3259                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    3260                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    3261                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    3262                     { /* likely */ }
    3263                     else
    3264                     {
    3265                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    3266                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3267                             fCpu = pVCpu->fLocalForcedActions;
    3268                         else
    3269                         {
    3270                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3271                             break;
    3272                         }
    3273                     }
    3274 #endif
    3275                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    3276                     {
    3277 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    3278                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    3279 #endif
    3280                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    3281                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    3282                                                       | VMCPU_FF_TLB_FLUSH
    3283                                                       | VMCPU_FF_UNHALT );
    3284 
    3285                         if (RT_LIKELY(   (   !fCpu
    3286                                           || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    3287                                               && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
    3288                                       && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
    3289                         {
    3290                             if (--cMaxInstructionsGccStupidity > 0)
    3291                             {
    3292                                 /* Poll timers every now an then according to the caller's specs. */
    3293                                 if (   (cMaxInstructionsGccStupidity & cPollRate) != 0
    3294                                     || !TMTimerPollBool(pVM, pVCpu))
    3295                                 {
    3296                                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3297                                     iemReInitDecoder(pVCpu);
    3298                                     continue;
    3299                                 }
    3300                             }
    3301                         }
    3302                     }
    3303                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3304                 }
    3305                 else if (pVCpu->iem.s.cActiveMappings > 0)
    3306                     iemMemRollback(pVCpu);
    3307                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3308                 break;
    3309             }
    3310         }
    3311 #ifdef IEM_WITH_SETJMP
    3312         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    3313         {
    3314             if (pVCpu->iem.s.cActiveMappings > 0)
    3315                 iemMemRollback(pVCpu);
    3316 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3317             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3318 # endif
    3319             pVCpu->iem.s.cLongJumps++;
    3320         }
    3321         IEM_CATCH_LONGJMP_END(pVCpu);
    3322 #endif
    3323 
    3324         /*
    3325          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    3326          */
    3327         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    3328         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    3329     }
    3330     else
    3331     {
    3332         if (pVCpu->iem.s.cActiveMappings > 0)
    3333             iemMemRollback(pVCpu);
    3334 
    3335 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3336         /*
    3337          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    3338          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    3339          */
    3340         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3341 #endif
    3342     }
    3343 
    3344     /*
    3345      * Maybe re-enter raw-mode and log.
    3346      */
    3347     if (rcStrict != VINF_SUCCESS)
    3348         LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    3349                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    3350     if (pcInstructions)
    3351         *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
    3352     return rcStrict;
    3353 }
    3354 
    3355 
    3356 /**
    3357  * Interface used by EMExecuteExec, does exit statistics and limits.
    3358  *
    3359  * @returns Strict VBox status code.
    3360  * @param   pVCpu               The cross context virtual CPU structure.
    3361  * @param   fWillExit           To be defined.
    3362  * @param   cMinInstructions    Minimum number of instructions to execute before checking for FFs.
    3363  * @param   cMaxInstructions    Maximum number of instructions to execute.
    3364  * @param   cMaxInstructionsWithoutExits
    3365  *                              The max number of instructions without exits.
    3366  * @param   pStats              Where to return statistics.
    3367  */
    3368 VMM_INT_DECL(VBOXSTRICTRC)
    3369 IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
    3370                 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
    3371 {
    3372     NOREF(fWillExit); /** @todo define flexible exit crits */
    3373 
    3374     /*
    3375      * Initialize return stats.
    3376      */
    3377     pStats->cInstructions    = 0;
    3378     pStats->cExits           = 0;
    3379     pStats->cMaxExitDistance = 0;
    3380     pStats->cReserved        = 0;
    3381 
    3382     /*
    3383      * Initial decoder init w/ prefetch, then setup setjmp.
    3384      */
    3385     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    3386     if (rcStrict == VINF_SUCCESS)
    3387     {
    3388 #ifdef IEM_WITH_SETJMP
    3389         pVCpu->iem.s.cActiveMappings     = 0; /** @todo wtf?!? */
    3390         IEM_TRY_SETJMP(pVCpu, rcStrict)
    3391 #endif
    3392         {
    3393 #ifdef IN_RING0
    3394             bool const fCheckPreemptionPending   = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    3395 #endif
    3396             uint32_t   cInstructionSinceLastExit = 0;
    3397 
    3398             /*
    3399              * The run loop.  We limit ourselves to 4096 instructions right now.
    3400              */
    3401             PVM pVM = pVCpu->CTX_SUFF(pVM);
    3402             for (;;)
    3403             {
    3404                 /*
    3405                  * Log the state.
    3406                  */
    3407 #ifdef LOG_ENABLED
    3408                 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
    3409 #endif
    3410 
    3411                 /*
    3412                  * Do the decoding and emulation.
    3413                  */
    3414                 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
    3415 
    3416                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    3417                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    3418 
    3419                 if (   cPotentialExits != pVCpu->iem.s.cPotentialExits
    3420                     && cInstructionSinceLastExit > 0 /* don't count the first */ )
    3421                 {
    3422                     pStats->cExits += 1;
    3423                     if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
    3424                         pStats->cMaxExitDistance = cInstructionSinceLastExit;
    3425                     cInstructionSinceLastExit = 0;
    3426                 }
    3427 
    3428                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3429                 {
    3430                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3431                     pVCpu->iem.s.cInstructions++;
    3432                     pStats->cInstructions++;
    3433                     cInstructionSinceLastExit++;
    3434 
    3435 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3436                     /* Perform any VMX nested-guest instruction boundary actions. */
    3437                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    3438                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    3439                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    3440                     { /* likely */ }
    3441                     else
    3442                     {
    3443                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    3444                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    3445                             fCpu = pVCpu->fLocalForcedActions;
    3446                         else
    3447                         {
    3448                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3449                             break;
    3450                         }
    3451                     }
    3452 #endif
    3453                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    3454                     {
    3455 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    3456                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    3457 #endif
    3458                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    3459                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    3460                                                       | VMCPU_FF_TLB_FLUSH
    3461                                                       | VMCPU_FF_UNHALT );
    3462                         if (RT_LIKELY(   (   (   !fCpu
    3463                                               || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    3464                                                   && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
    3465                                           && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
    3466                                       || pStats->cInstructions < cMinInstructions))
    3467                         {
    3468                             if (pStats->cInstructions < cMaxInstructions)
    3469                             {
    3470                                 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
    3471                                 {
    3472 #ifdef IN_RING0
    3473                                     if (   !fCheckPreemptionPending
    3474                                         || !RTThreadPreemptIsPending(NIL_RTTHREAD))
    3475 #endif
    3476                                     {
    3477                                         Assert(pVCpu->iem.s.cActiveMappings == 0);
    3478                                         iemReInitDecoder(pVCpu);
    3479                                         continue;
    3480                                     }
    3481 #ifdef IN_RING0
    3482                                     rcStrict = VINF_EM_RAW_INTERRUPT;
    3483                                     break;
    3484 #endif
    3485                                 }
    3486                             }
    3487                         }
    3488                         Assert(!(fCpu & VMCPU_FF_IEM));
    3489                     }
    3490                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    3491                 }
    3492                 else if (pVCpu->iem.s.cActiveMappings > 0)
    3493                         iemMemRollback(pVCpu);
    3494                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3495                 break;
    3496             }
    3497         }
    3498 #ifdef IEM_WITH_SETJMP
    3499         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    3500         {
    3501             if (pVCpu->iem.s.cActiveMappings > 0)
    3502                 iemMemRollback(pVCpu);
    3503             pVCpu->iem.s.cLongJumps++;
    3504         }
    3505         IEM_CATCH_LONGJMP_END(pVCpu);
    3506 #endif
    3507 
    3508         /*
    3509          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    3510          */
    3511         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    3512         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    3513     }
    3514     else
    3515     {
    3516         if (pVCpu->iem.s.cActiveMappings > 0)
    3517             iemMemRollback(pVCpu);
    3518 
    3519 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3520         /*
    3521          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    3522          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    3523          */
    3524         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    3525 #endif
    3526     }
    3527 
    3528     /*
    3529      * Maybe re-enter raw-mode and log.
    3530      */
    3531     if (rcStrict != VINF_SUCCESS)
    3532         LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
    3533                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
    3534                  pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
    3535     return rcStrict;
    3536 }
    3537 
    3538 
    3539 /**
    3540  * Injects a trap, fault, abort, software interrupt or external interrupt.
    3541  *
    3542  * The parameter list matches TRPMQueryTrapAll pretty closely.
    3543  *
    3544  * @returns Strict VBox status code.
    3545  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    3546  * @param   u8TrapNo            The trap number.
    3547  * @param   enmType             What type is it (trap/fault/abort), software
    3548  *                              interrupt or hardware interrupt.
    3549  * @param   uErrCode            The error code if applicable.
    3550  * @param   uCr2                The CR2 value if applicable.
    3551  * @param   cbInstr             The instruction length (only relevant for
    3552  *                              software interrupts).
    3553  * @note    x86 specific, but difficult to move due to iemInitDecoder dep.
    3554  */
    3555 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
    3556                                          uint8_t cbInstr)
    3557 {
    3558     iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
    3559 #ifdef DBGFTRACE_ENABLED
    3560     RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
    3561                       u8TrapNo, enmType, uErrCode, uCr2);
    3562 #endif
    3563 
    3564     uint32_t fFlags;
    3565     switch (enmType)
    3566     {
    3567         case TRPM_HARDWARE_INT:
    3568             Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
    3569             fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
    3570             uErrCode = uCr2 = 0;
    3571             break;
    3572 
    3573         case TRPM_SOFTWARE_INT:
    3574             Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
    3575             fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
    3576             uErrCode = uCr2 = 0;
    3577             break;
    3578 
    3579         case TRPM_TRAP:
    3580         case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
    3581             Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
    3582             fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
    3583             if (u8TrapNo == X86_XCPT_PF)
    3584                 fFlags |= IEM_XCPT_FLAGS_CR2;
    3585             switch (u8TrapNo)
    3586             {
    3587                 case X86_XCPT_DF:
    3588                 case X86_XCPT_TS:
    3589                 case X86_XCPT_NP:
    3590                 case X86_XCPT_SS:
    3591                 case X86_XCPT_PF:
    3592                 case X86_XCPT_AC:
    3593                 case X86_XCPT_GP:
    3594                     fFlags |= IEM_XCPT_FLAGS_ERR;
    3595                     break;
    3596             }
    3597             break;
    3598 
    3599         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    3600     }
    3601 
    3602     VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
    3603 
    3604     if (pVCpu->iem.s.cActiveMappings > 0)
    3605         iemMemRollback(pVCpu);
    3606 
    3607     return rcStrict;
    3608 }
    3609 
    3610 
    3611 /**
    3612  * Injects the active TRPM event.
    3613  *
    3614  * @returns Strict VBox status code.
    3615  * @param   pVCpu               The cross context virtual CPU structure.
    3616  */
    3617 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
    3618 {
    3619 #ifndef IEM_IMPLEMENTS_TASKSWITCH
    3620     IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
    3621 #else
    3622     uint8_t     u8TrapNo;
    3623     TRPMEVENT   enmType;
    3624     uint32_t    uErrCode;
    3625     RTGCUINTPTR uCr2;
    3626     uint8_t     cbInstr;
    3627     int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
    3628     if (RT_FAILURE(rc))
    3629         return rc;
    3630 
    3631     /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
    3632      *        ICEBP \#DB injection as a special case. */
    3633     VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
    3634 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    3635     if (rcStrict == VINF_SVM_VMEXIT)
    3636         rcStrict = VINF_SUCCESS;
    3637 #endif
    3638 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3639     if (rcStrict == VINF_VMX_VMEXIT)
    3640         rcStrict = VINF_SUCCESS;
    3641 #endif
    3642     /** @todo Are there any other codes that imply the event was successfully
    3643      *        delivered to the guest? See @bugref{6607}.  */
    3644     if (   rcStrict == VINF_SUCCESS
    3645         || rcStrict == VINF_IEM_RAISED_XCPT)
    3646         TRPMResetTrap(pVCpu);
    3647 
    3648     return rcStrict;
    3649 #endif
    3650 }
    3651 
    3652 
    3653 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
    3654 {
    3655     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    3656     return VERR_NOT_IMPLEMENTED;
    3657 }
    3658 
    3659 
    3660 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
    3661 {
    3662     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    3663     return VERR_NOT_IMPLEMENTED;
    3664 }
    3665 
    3666 #ifdef IN_RING3
    3667 
    3668 /**
    3669  * Handles the unlikely and probably fatal merge cases.
    3670  *
    3671  * @returns Merged status code.
    3672  * @param   rcStrict        Current EM status code.
    3673  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    3674  *                          with @a rcStrict.
    3675  * @param   iMemMap         The memory mapping index. For error reporting only.
    3676  * @param   pVCpu           The cross context virtual CPU structure of the calling
    3677  *                          thread, for error reporting only.
    3678  */
    3679 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
    3680                                                           unsigned iMemMap, PVMCPUCC pVCpu)
    3681 {
    3682     if (RT_FAILURE_NP(rcStrict))
    3683         return rcStrict;
    3684 
    3685     if (RT_FAILURE_NP(rcStrictCommit))
    3686         return rcStrictCommit;
    3687 
    3688     if (rcStrict == rcStrictCommit)
    3689         return rcStrictCommit;
    3690 
    3691     AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
    3692                            VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
    3693                            pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
    3694                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
    3695                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
    3696     return VERR_IOM_FF_STATUS_IPE;
    3697 }
    3698 
    3699 
    3700 /**
    3701  * Helper for IOMR3ProcessForceFlag.
    3702  *
    3703  * @returns Merged status code.
    3704  * @param   rcStrict        Current EM status code.
    3705  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    3706  *                          with @a rcStrict.
    3707  * @param   iMemMap         The memory mapping index. For error reporting only.
    3708  * @param   pVCpu           The cross context virtual CPU structure of the calling
    3709  *                          thread, for error reporting only.
    3710  */
    3711 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
    3712 {
    3713     /* Simple. */
    3714     if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
    3715         return rcStrictCommit;
    3716 
    3717     if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
    3718         return rcStrict;
    3719 
    3720     /* EM scheduling status codes. */
    3721     if (RT_LIKELY(   rcStrict >= VINF_EM_FIRST
    3722                   && rcStrict <= VINF_EM_LAST))
    3723     {
    3724         if (RT_LIKELY(   rcStrictCommit >= VINF_EM_FIRST
    3725                       && rcStrictCommit <= VINF_EM_LAST))
    3726             return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
    3727     }
    3728 
    3729     /* Unlikely */
    3730     return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
    3731 }
    3732 
    3733 
    3734 /**
    3735  * Called by force-flag handling code when VMCPU_FF_IEM is set.
    3736  *
    3737  * @returns Merge between @a rcStrict and what the commit operation returned.
    3738  * @param   pVM         The cross context VM structure.
    3739  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    3740  * @param   rcStrict    The status code returned by ring-0 or raw-mode.
    3741  */
    3742 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    3743 {
    3744     /*
    3745      * Reset the pending commit.
    3746      */
    3747     AssertMsg(  (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
    3748               & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
    3749               ("%#x %#x %#x\n",
    3750                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    3751     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
    3752 
    3753     /*
    3754      * Commit the pending bounce buffers (usually just one).
    3755      */
    3756     unsigned cBufs = 0;
    3757     unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    3758     while (iMemMap-- > 0)
    3759         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
    3760         {
    3761             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    3762             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    3763             Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
    3764 
    3765             uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    3766             uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    3767             uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    3768 
    3769             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
    3770             {
    3771                 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
    3772                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    3773                                                             pbBuf,
    3774                                                             cbFirst,
    3775                                                             PGMACCESSORIGIN_IEM);
    3776                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
    3777                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
    3778                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    3779                      VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
    3780             }
    3781 
    3782             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
    3783             {
    3784                 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
    3785                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    3786                                                             pbBuf + cbFirst,
    3787                                                             cbSecond,
    3788                                                             PGMACCESSORIGIN_IEM);
    3789                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
    3790                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
    3791                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
    3792                      VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
    3793             }
    3794             cBufs++;
    3795             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    3796         }
    3797 
    3798     AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
    3799               ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
    3800                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    3801     pVCpu->iem.s.cActiveMappings = 0;
    3802     return rcStrict;
    3803 }
    3804 
    3805 #endif /* IN_RING3 */
    3806 
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMInternal-x86.h

    r108220 r108244  
    4141 * @{
    4242 */
     43
     44VBOXSTRICTRC iemOpcodeFetchPrefetch(PVMCPUCC pVCpu) RT_NOEXCEPT;
    4345
    4446
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette