VirtualBox

Changeset 108248 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Feb 17, 2025 12:34:56 AM (3 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167569
Message:

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r108247 r108248  
    399399#endif
    400400}
    401 
    402 
    403 /** @name   Memory access.
    404  *
    405  * @{
    406  */
    407 
    408 #undef  LOG_GROUP
    409 #define LOG_GROUP LOG_GROUP_IEM_MEM
    410 
    411 #if 0 /*unused*/
    412 /**
    413  * Looks up a memory mapping entry.
    414  *
    415  * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
    416  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    417  * @param   pvMem           The memory address.
    418  * @param   fAccess         The access to.
    419  */
    420 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    421 {
    422     Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    423     fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
    424     if (   pVCpu->iem.s.aMemMappings[0].pv == pvMem
    425         && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    426         return 0;
    427     if (   pVCpu->iem.s.aMemMappings[1].pv == pvMem
    428         && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    429         return 1;
    430     if (   pVCpu->iem.s.aMemMappings[2].pv == pvMem
    431         && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    432         return 2;
    433     return VERR_NOT_FOUND;
    434 }
    435 #endif
    436 
    437 /**
    438  * Finds a free memmap entry when using iNextMapping doesn't work.
    439  *
    440  * @returns Memory mapping index, 1024 on failure.
    441  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    442  */
    443 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
    444 {
    445     /*
    446      * The easy case.
    447      */
    448     if (pVCpu->iem.s.cActiveMappings == 0)
    449     {
    450         pVCpu->iem.s.iNextMapping = 1;
    451         return 0;
    452     }
    453 
    454     /* There should be enough mappings for all instructions. */
    455     AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
    456 
    457     for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
    458         if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
    459             return i;
    460 
    461     AssertFailedReturn(1024);
    462 }
    463 
    464 
    465 /**
    466  * Commits a bounce buffer that needs writing back and unmaps it.
    467  *
    468  * @returns Strict VBox status code.
    469  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    470  * @param   iMemMap         The index of the buffer to commit.
    471  * @param   fPostponeFail   Whether we can postpone writer failures to ring-3.
    472  *                          Always false in ring-3, obviously.
    473  */
    474 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
    475 {
    476     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    477     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    478 #ifdef IN_RING3
    479     Assert(!fPostponeFail);
    480     RT_NOREF_PV(fPostponeFail);
    481 #endif
    482 
    483     /*
    484      * Do the writing.
    485      */
    486     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    487     if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
    488     {
    489         uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    490         uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    491         uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    492         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    493         {
    494             /*
    495              * Carefully and efficiently dealing with access handler return
    496              * codes make this a little bloated.
    497              */
    498             VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
    499                                                  pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    500                                                  pbBuf,
    501                                                  cbFirst,
    502                                                  PGMACCESSORIGIN_IEM);
    503             if (rcStrict == VINF_SUCCESS)
    504             {
    505                 if (cbSecond)
    506                 {
    507                     rcStrict = PGMPhysWrite(pVM,
    508                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    509                                             pbBuf + cbFirst,
    510                                             cbSecond,
    511                                             PGMACCESSORIGIN_IEM);
    512                     if (rcStrict == VINF_SUCCESS)
    513                     { /* nothing */ }
    514                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    515                     {
    516                         LogEx(LOG_GROUP_IEM,
    517                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
    518                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    519                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    520                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    521                     }
    522 #ifndef IN_RING3
    523                     else if (fPostponeFail)
    524                     {
    525                         LogEx(LOG_GROUP_IEM,
    526                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    527                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    528                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    529                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    530                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    531                         return iemSetPassUpStatus(pVCpu, rcStrict);
    532                     }
    533 #endif
    534                     else
    535                     {
    536                         LogEx(LOG_GROUP_IEM,
    537                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    538                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    539                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    540                         return rcStrict;
    541                     }
    542                 }
    543             }
    544             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    545             {
    546                 if (!cbSecond)
    547                 {
    548                     LogEx(LOG_GROUP_IEM,
    549                           ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
    550                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    551                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    552                 }
    553                 else
    554                 {
    555                     VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
    556                                                           pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    557                                                           pbBuf + cbFirst,
    558                                                           cbSecond,
    559                                                           PGMACCESSORIGIN_IEM);
    560                     if (rcStrict2 == VINF_SUCCESS)
    561                     {
    562                         LogEx(LOG_GROUP_IEM,
    563                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
    564                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    565                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    566                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    567                     }
    568                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    569                     {
    570                         LogEx(LOG_GROUP_IEM,
    571                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
    572                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    573                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    574                         PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    575                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    576                     }
    577 #ifndef IN_RING3
    578                     else if (fPostponeFail)
    579                     {
    580                         LogEx(LOG_GROUP_IEM,
    581                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    582                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    583                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    584                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    585                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    586                         return iemSetPassUpStatus(pVCpu, rcStrict);
    587                     }
    588 #endif
    589                     else
    590                     {
    591                         LogEx(LOG_GROUP_IEM,
    592                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    593                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    594                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    595                         return rcStrict2;
    596                     }
    597                 }
    598             }
    599 #ifndef IN_RING3
    600             else if (fPostponeFail)
    601             {
    602                 LogEx(LOG_GROUP_IEM,
    603                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    604                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    605                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    606                 if (!cbSecond)
    607                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
    608                 else
    609                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
    610                 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    611                 return iemSetPassUpStatus(pVCpu, rcStrict);
    612             }
    613 #endif
    614             else
    615             {
    616                 LogEx(LOG_GROUP_IEM,
    617                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    618                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    619                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    620                 return rcStrict;
    621             }
    622         }
    623         else
    624         {
    625             /*
    626              * No access handlers, much simpler.
    627              */
    628             int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
    629             if (RT_SUCCESS(rc))
    630             {
    631                 if (cbSecond)
    632                 {
    633                     rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
    634                     if (RT_SUCCESS(rc))
    635                     { /* likely */ }
    636                     else
    637                     {
    638                         LogEx(LOG_GROUP_IEM,
    639                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    640                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    641                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
    642                         return rc;
    643                     }
    644                 }
    645             }
    646             else
    647             {
    648                 LogEx(LOG_GROUP_IEM,
    649                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    650                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
    651                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    652                 return rc;
    653             }
    654         }
    655     }
    656 
    657 #if defined(IEM_LOG_MEMORY_WRITES)
    658     Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    659           RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
    660     if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
    661         Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    662               RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
    663               &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
    664 
    665     size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    666     g_cbIemWrote = cbWrote;
    667     memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
    668 #endif
    669 
    670     /*
    671      * Free the mapping entry.
    672      */
    673     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    674     Assert(pVCpu->iem.s.cActiveMappings != 0);
    675     pVCpu->iem.s.cActiveMappings--;
    676     return VINF_SUCCESS;
    677 }
    678 
    679 
    680 /**
    681  * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
    682  * @todo duplicated
    683  */
    684 DECL_FORCE_INLINE(uint32_t)
    685 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
    686 {
    687     bool const  fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
    688     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    689         return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    690     return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    691 }
    692 
    693 
    694 /**
    695  * iemMemMap worker that deals with a request crossing pages.
    696  */
    697 VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
    698                                             size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
    699 {
    700     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
    701     Assert(cbMem <= GUEST_PAGE_SIZE);
    702 
    703     /*
    704      * Do the address translations.
    705      */
    706     uint32_t const cbFirstPage  = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
    707     RTGCPHYS GCPhysFirst;
    708     VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
    709     if (rcStrict != VINF_SUCCESS)
    710         return rcStrict;
    711     Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
    712 
    713     uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
    714     RTGCPHYS GCPhysSecond;
    715     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    716                                                  cbSecondPage, fAccess, &GCPhysSecond);
    717     if (rcStrict != VINF_SUCCESS)
    718         return rcStrict;
    719     Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
    720     GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
    721 
    722     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    723 
    724     /*
    725      * Check for data breakpoints.
    726      */
    727     if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
    728     { /* likely */ }
    729     else
    730     {
    731         uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
    732         fDataBps         |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    733                                                       cbSecondPage, fAccess);
    734         pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
    735         if (fDataBps > 1)
    736             LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
    737                                   fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    738     }
    739 
    740     /*
    741      * Read in the current memory content if it's a read, execute or partial
    742      * write access.
    743      */
    744     uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    745 
    746     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    747     {
    748         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    749         {
    750             /*
    751              * Must carefully deal with access handler status codes here,
    752              * makes the code a bit bloated.
    753              */
    754             rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
    755             if (rcStrict == VINF_SUCCESS)
    756             {
    757                 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    758                 if (rcStrict == VINF_SUCCESS)
    759                 { /*likely */ }
    760                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    761                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    762                 else
    763                 {
    764                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
    765                                           GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    766                     return rcStrict;
    767                 }
    768             }
    769             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    770             {
    771                 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    772                 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    773                 {
    774                     PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    775                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    776                 }
    777                 else
    778                 {
    779                     LogEx(LOG_GROUP_IEM,
    780                           ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
    781                            GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
    782                     return rcStrict2;
    783                 }
    784             }
    785             else
    786             {
    787                 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    788                                       GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    789                 return rcStrict;
    790             }
    791         }
    792         else
    793         {
    794             /*
    795              * No informational status codes here, much more straight forward.
    796              */
    797             int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
    798             if (RT_SUCCESS(rc))
    799             {
    800                 Assert(rc == VINF_SUCCESS);
    801                 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
    802                 if (RT_SUCCESS(rc))
    803                     Assert(rc == VINF_SUCCESS);
    804                 else
    805                 {
    806                     LogEx(LOG_GROUP_IEM,
    807                           ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
    808                     return rc;
    809                 }
    810             }
    811             else
    812             {
    813                 LogEx(LOG_GROUP_IEM,
    814                       ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
    815                 return rc;
    816             }
    817         }
    818     }
    819 #ifdef VBOX_STRICT
    820     else
    821         memset(pbBuf, 0xcc, cbMem);
    822     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    823         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    824 #endif
    825     AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
    826 
    827     /*
    828      * Commit the bounce buffer entry.
    829      */
    830     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    831     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = GCPhysSecond;
    832     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbFirstPage;
    833     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = (uint16_t)cbSecondPage;
    834     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = false;
    835     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    836     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    837     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    838     pVCpu->iem.s.cActiveMappings++;
    839 
    840     *ppvMem = pbBuf;
    841     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    842     return VINF_SUCCESS;
    843 }
    844 
    845 
    846 /**
    847  * iemMemMap woker that deals with iemMemPageMap failures.
    848  */
    849 VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
    850                                        RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
    851 {
    852     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
    853 
    854     /*
    855      * Filter out conditions we can handle and the ones which shouldn't happen.
    856      */
    857     if (   rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
    858         && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
    859         && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
    860     {
    861         AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
    862         return rcMap;
    863     }
    864     pVCpu->iem.s.cPotentialExits++;
    865 
    866     /*
    867      * Read in the current memory content if it's a read, execute or partial
    868      * write access.
    869      */
    870     uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    871     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    872     {
    873         if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
    874             memset(pbBuf, 0xff, cbMem);
    875         else
    876         {
    877             int rc;
    878             if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    879             {
    880                 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
    881                 if (rcStrict == VINF_SUCCESS)
    882                 { /* nothing */ }
    883                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    884                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    885                 else
    886                 {
    887                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    888                                           GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    889                     return rcStrict;
    890                 }
    891             }
    892             else
    893             {
    894                 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
    895                 if (RT_SUCCESS(rc))
    896                 { /* likely */ }
    897                 else
    898                 {
    899                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    900                                           GCPhysFirst, rc));
    901                     return rc;
    902                 }
    903             }
    904         }
    905     }
    906 #ifdef VBOX_STRICT
    907     else
    908         memset(pbBuf, 0xcc, cbMem);
    909 #endif
    910 #ifdef VBOX_STRICT
    911     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    912         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    913 #endif
    914 
    915     /*
    916      * Commit the bounce buffer entry.
    917      */
    918     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    919     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = NIL_RTGCPHYS;
    920     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbMem;
    921     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = 0;
    922     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
    923     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    924     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    925     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    926     pVCpu->iem.s.cActiveMappings++;
    927 
    928     *ppvMem = pbBuf;
    929     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    930     return VINF_SUCCESS;
    931 }
    932 
    933 
    934 
    935 /**
    936  * Commits the guest memory if bounce buffered and unmaps it.
    937  *
    938  * @returns Strict VBox status code.
    939  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    940  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    941  */
    942 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    943 {
    944     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    945     AssertMsgReturn(   (bUnmapInfo & 0x08)
    946                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    947                     && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
    948                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    949                     VERR_NOT_FOUND);
    950 
    951     /* If it's bounce buffered, we may need to write back the buffer. */
    952     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    953     {
    954         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    955             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    956     }
    957     /* Otherwise unlock it. */
    958     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    959         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    960 
    961     /* Free the entry. */
    962     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    963     Assert(pVCpu->iem.s.cActiveMappings != 0);
    964     pVCpu->iem.s.cActiveMappings--;
    965     return VINF_SUCCESS;
    966 }
    967 
    968 
    969 /**
    970  * Rolls back the guest memory (conceptually only) and unmaps it.
    971  *
    972  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    973  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    974  */
    975 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    976 {
    977     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    978     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    979                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    980                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    981                            == ((unsigned)bUnmapInfo >> 4),
    982                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    983 
    984     /* Unlock it if necessary. */
    985     if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    986         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    987 
    988     /* Free the entry. */
    989     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    990     Assert(pVCpu->iem.s.cActiveMappings != 0);
    991     pVCpu->iem.s.cActiveMappings--;
    992 }
    993 
    994 #ifdef IEM_WITH_SETJMP
    995 
    996 /**
    997  * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
    998  *
    999  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1000  * @param   pvMem               The mapping.
    1001  * @param   fAccess             The kind of access.
    1002  */
    1003 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1004 {
    1005     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1006     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    1007                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1008                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1009                            == ((unsigned)bUnmapInfo >> 4),
    1010                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    1011 
    1012     /* If it's bounce buffered, we may need to write back the buffer. */
    1013     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1014     {
    1015         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1016         {
    1017             VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    1018             if (rcStrict == VINF_SUCCESS)
    1019                 return;
    1020             IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1021         }
    1022     }
    1023     /* Otherwise unlock it. */
    1024     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1025         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1026 
    1027     /* Free the entry. */
    1028     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1029     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1030     pVCpu->iem.s.cActiveMappings--;
    1031 }
    1032 
    1033 
    1034 /** Fallback for iemMemCommitAndUnmapRwJmp.  */
    1035 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1036 {
    1037     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    1038     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1039 }
    1040 
    1041 
    1042 /** Fallback for iemMemCommitAndUnmapAtJmp.  */
    1043 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1044 {
    1045     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    1046     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1047 }
    1048 
    1049 
    1050 /** Fallback for iemMemCommitAndUnmapWoJmp.  */
    1051 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1052 {
    1053     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    1054     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1055 }
    1056 
    1057 
    1058 /** Fallback for iemMemCommitAndUnmapRoJmp.  */
    1059 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1060 {
    1061     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
    1062     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1063 }
    1064 
    1065 
    1066 /** Fallback for iemMemRollbackAndUnmapWo.  */
    1067 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1068 {
    1069     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    1070     iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
    1071 }
    1072 
    1073 #endif /* IEM_WITH_SETJMP */
    1074 
    1075 #ifndef IN_RING3
    1076 /**
    1077  * Commits the guest memory if bounce buffered and unmaps it, if any bounce
    1078  * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
    1079  *
    1080  * Allows the instruction to be completed and retired, while the IEM user will
    1081  * return to ring-3 immediately afterwards and do the postponed writes there.
    1082  *
    1083  * @returns VBox status code (no strict statuses).  Caller must check
    1084  *          VMCPU_FF_IEM before repeating string instructions and similar stuff.
    1085  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1086  * @param   pvMem               The mapping.
    1087  * @param   fAccess             The kind of access.
    1088  */
    1089 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1090 {
    1091     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1092     AssertMsgReturn(   (bUnmapInfo & 0x08)
    1093                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1094                     &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1095                        == ((unsigned)bUnmapInfo >> 4),
    1096                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    1097                     VERR_NOT_FOUND);
    1098 
    1099     /* If it's bounce buffered, we may need to write back the buffer. */
    1100     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1101     {
    1102         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1103             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
    1104     }
    1105     /* Otherwise unlock it. */
    1106     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1107         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1108 
    1109     /* Free the entry. */
    1110     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1111     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1112     pVCpu->iem.s.cActiveMappings--;
    1113     return VINF_SUCCESS;
    1114 }
    1115 #endif
    1116 
    1117 
    1118 /**
    1119  * Rollbacks mappings, releasing page locks and such.
    1120  *
    1121  * The caller shall only call this after checking cActiveMappings.
    1122  *
    1123  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    1124  */
    1125 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
    1126 {
    1127     Assert(pVCpu->iem.s.cActiveMappings > 0);
    1128 
    1129     uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    1130     while (iMemMap-- > 0)
    1131     {
    1132         uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
    1133         if (fAccess != IEM_ACCESS_INVALID)
    1134         {
    1135             AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
    1136             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1137             if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
    1138                 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1139             AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
    1140                       ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
    1141                        iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
    1142                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
    1143             pVCpu->iem.s.cActiveMappings--;
    1144         }
    1145     }
    1146 }
    1147 
    1148 #undef  LOG_GROUP
    1149 #define LOG_GROUP LOG_GROUP_IEM
    1150 
    1151 /** @} */
    1152401
    1153402
     
    20801329}
    20811330
    2082 #ifdef IN_RING3
    2083 
    2084 /**
    2085  * Handles the unlikely and probably fatal merge cases.
    2086  *
    2087  * @returns Merged status code.
    2088  * @param   rcStrict        Current EM status code.
    2089  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    2090  *                          with @a rcStrict.
    2091  * @param   iMemMap         The memory mapping index. For error reporting only.
    2092  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2093  *                          thread, for error reporting only.
    2094  */
    2095 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
    2096                                                           unsigned iMemMap, PVMCPUCC pVCpu)
    2097 {
    2098     if (RT_FAILURE_NP(rcStrict))
    2099         return rcStrict;
    2100 
    2101     if (RT_FAILURE_NP(rcStrictCommit))
    2102         return rcStrictCommit;
    2103 
    2104     if (rcStrict == rcStrictCommit)
    2105         return rcStrictCommit;
    2106 
    2107     AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
    2108                            VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
    2109                            pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
    2110                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
    2111                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
    2112     return VERR_IOM_FF_STATUS_IPE;
    2113 }
    2114 
    2115 
    2116 /**
    2117  * Helper for IOMR3ProcessForceFlag.
    2118  *
    2119  * @returns Merged status code.
    2120  * @param   rcStrict        Current EM status code.
    2121  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    2122  *                          with @a rcStrict.
    2123  * @param   iMemMap         The memory mapping index. For error reporting only.
    2124  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2125  *                          thread, for error reporting only.
    2126  */
    2127 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
    2128 {
    2129     /* Simple. */
    2130     if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
    2131         return rcStrictCommit;
    2132 
    2133     if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
    2134         return rcStrict;
    2135 
    2136     /* EM scheduling status codes. */
    2137     if (RT_LIKELY(   rcStrict >= VINF_EM_FIRST
    2138                   && rcStrict <= VINF_EM_LAST))
    2139     {
    2140         if (RT_LIKELY(   rcStrictCommit >= VINF_EM_FIRST
    2141                       && rcStrictCommit <= VINF_EM_LAST))
    2142             return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
    2143     }
    2144 
    2145     /* Unlikely */
    2146     return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
    2147 }
    2148 
    2149 
    2150 /**
    2151  * Called by force-flag handling code when VMCPU_FF_IEM is set.
    2152  *
    2153  * @returns Merge between @a rcStrict and what the commit operation returned.
    2154  * @param   pVM         The cross context VM structure.
    2155  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2156  * @param   rcStrict    The status code returned by ring-0 or raw-mode.
    2157  */
    2158 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    2159 {
    2160     /*
    2161      * Reset the pending commit.
    2162      */
    2163     AssertMsg(  (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
    2164               & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
    2165               ("%#x %#x %#x\n",
    2166                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    2167     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
    2168 
    2169     /*
    2170      * Commit the pending bounce buffers (usually just one).
    2171      */
    2172     unsigned cBufs = 0;
    2173     unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    2174     while (iMemMap-- > 0)
    2175         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
    2176         {
    2177             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    2178             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    2179             Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
    2180 
    2181             uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    2182             uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    2183             uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2184 
    2185             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
    2186             {
    2187                 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
    2188                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    2189                                                             pbBuf,
    2190                                                             cbFirst,
    2191                                                             PGMACCESSORIGIN_IEM);
    2192                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
    2193                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
    2194                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2195                      VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
    2196             }
    2197 
    2198             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
    2199             {
    2200                 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
    2201                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2202                                                             pbBuf + cbFirst,
    2203                                                             cbSecond,
    2204                                                             PGMACCESSORIGIN_IEM);
    2205                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
    2206                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
    2207                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
    2208                      VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
    2209             }
    2210             cBufs++;
    2211             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2212         }
    2213 
    2214     AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
    2215               ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
    2216                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    2217     pVCpu->iem.s.cActiveMappings = 0;
    2218     return rcStrict;
    2219 }
    2220 
    2221 #endif /* IN_RING3 */
    2222 
  • trunk/src/VBox/VMM/VMMAll/IEMAllMem.cpp

    r108247 r108248  
    11/* $Id$ */
    22/** @file
    3  * IEM - Interpreted Execution Manager - All Contexts.
     3 * IEM - Interpreted Execution Manager - Common Memory Routines.
    44 */
    55
     
    2727
    2828
    29 /** @page pg_iem    IEM - Interpreted Execution Manager
    30  *
    31  * The interpreted exeuction manager (IEM) is for executing short guest code
    32  * sequences that are causing too many exits / virtualization traps.  It will
    33  * also be used to interpret single instructions, thus replacing the selective
    34  * interpreters in EM and IOM.
    35  *
    36  * Design goals:
    37  *      - Relatively small footprint, although we favour speed and correctness
    38  *        over size.
    39  *      - Reasonably fast.
    40  *      - Correctly handle lock prefixed instructions.
    41  *      - Complete instruction set - eventually.
    42  *      - Refactorable into a recompiler, maybe.
    43  *      - Replace EMInterpret*.
    44  *
    45  * Using the existing disassembler has been considered, however this is thought
    46  * to conflict with speed as the disassembler chews things a bit too much while
    47  * leaving us with a somewhat complicated state to interpret afterwards.
    48  *
    49  *
    50  * The current code is very much work in progress. You've been warned!
    51  *
    52  *
    53  * @section sec_iem_fpu_instr   FPU Instructions
    54  *
    55  * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
    56  * same or equivalent instructions on the host FPU.  To make life easy, we also
    57  * let the FPU prioritize the unmasked exceptions for us.  This however, only
    58  * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
    59  * for FPU exception delivery, because with CR0.NE=0 there is a window where we
    60  * can trigger spurious FPU exceptions.
    61  *
    62  * The guest FPU state is not loaded into the host CPU and kept there till we
    63  * leave IEM because the calling conventions have declared an all year open
    64  * season on much of the FPU state.  For instance an innocent looking call to
    65  * memcpy might end up using a whole bunch of XMM or MM registers if the
    66  * particular implementation finds it worthwhile.
    67  *
    68  *
    69  * @section sec_iem_logging     Logging
    70  *
    71  * The IEM code uses the \"IEM\" log group for the main logging. The different
    72  * logging levels/flags are generally used for the following purposes:
    73  *      - Level 1  (Log)  : Errors, exceptions, interrupts and such major events.
    74  *      - Flow  (LogFlow) : Basic enter/exit IEM state info.
    75  *      - Level 2  (Log2) : ?
    76  *      - Level 3  (Log3) : More detailed enter/exit IEM state info.
    77  *      - Level 4  (Log4) : Decoding mnemonics w/ EIP.
    78  *      - Level 5  (Log5) : Decoding details.
    79  *      - Level 6  (Log6) : Enables/disables the lockstep comparison with REM.
    80  *      - Level 7  (Log7) : iret++ execution logging.
    81  *      - Level 8  (Log8) :
    82  *      - Level 9  (Log9) :
    83  *      - Level 10 (Log10): TLBs.
    84  *      - Level 11 (Log11): Unmasked FPU exceptions.
    85  *
    86  * The \"IEM_MEM\" log group covers most of memory related details logging,
    87  * except for errors and exceptions:
    88  *      - Level 1  (Log)  : Reads.
    89  *      - Level 2  (Log2) : Read fallbacks.
    90  *      - Level 3  (Log3) : MemMap read.
    91  *      - Level 4  (Log4) : MemMap read fallbacks.
    92  *      - Level 5  (Log5) : Writes
    93  *      - Level 6  (Log6) : Write fallbacks.
    94  *      - Level 7  (Log7) : MemMap writes and read-writes.
    95  *      - Level 8  (Log8) : MemMap write and read-write fallbacks.
    96  *      - Level 9  (Log9) : Stack reads.
    97  *      - Level 10 (Log10): Stack read fallbacks.
    98  *      - Level 11 (Log11): Stack writes.
    99  *      - Level 12 (Log12): Stack write fallbacks.
    100  *      - Flow  (LogFlow) :
    101  *
    102  * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
    103  *      - Level 1  (Log)  : Errors and other major events.
    104  *      - Flow (LogFlow)  : Misc flow stuff (cleanup?)
    105  *      - Level 2  (Log2) : VM exits.
    106  *
    107  * The syscall logging level assignments:
    108  *      - Level 1: DOS and BIOS.
    109  *      - Level 2: Windows 3.x
    110  *      - Level 3: Linux.
    111  */
    112 
    113 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
    114 #ifdef _MSC_VER
    115 # pragma warning(disable:4505)
    116 #endif
    117 
    118 
    11929/*********************************************************************************************************************************
    12030*   Header Files                                                                                                                 *
    12131*********************************************************************************************************************************/
    122 #define LOG_GROUP   LOG_GROUP_IEM
     32#define LOG_GROUP   LOG_GROUP_IEM_MEM
    12333#define VMCPU_INCL_CPUM_GST_CTX
    12434#ifdef IN_RING0
     
    12737#include <VBox/vmm/iem.h>
    12838#include <VBox/vmm/cpum.h>
    129 #include <VBox/vmm/pdmapic.h>
    130 #include <VBox/vmm/pdm.h>
    13139#include <VBox/vmm/pgm.h>
    132 #include <VBox/vmm/iom.h>
    133 #include <VBox/vmm/em.h>
    134 #include <VBox/vmm/hm.h>
    135 #include <VBox/vmm/nem.h>
    136 #include <VBox/vmm/gcm.h>
    137 #include <VBox/vmm/gim.h>
    138 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    139 # include <VBox/vmm/em.h>
    140 # include <VBox/vmm/hm_svm.h>
    141 #endif
    142 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    143 # include <VBox/vmm/hmvmxinline.h>
    144 #endif
    145 #include <VBox/vmm/tm.h>
    14640#include <VBox/vmm/dbgf.h>
    147 #include <VBox/vmm/dbgftrace.h>
    14841#include "IEMInternal.h"
    14942#include <VBox/vmm/vmcc.h>
     
    15144#include <VBox/err.h>
    15245#include <VBox/param.h>
    153 #include <VBox/dis.h>
    154 #include <iprt/asm-math.h>
    155 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    156 # include <iprt/asm-amd64-x86.h>
    157 #elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
    158 # include <iprt/asm-arm.h>
    159 #endif
    16046#include <iprt/assert.h>
    16147#include <iprt/string.h>
     
    17965
    18066
    181 /**
    182  * Initializes the decoder state.
    183  *
    184  * iemReInitDecoder is mostly a copy of this function.
    185  *
    186  * @param   pVCpu               The cross context virtual CPU structure of the
    187  *                              calling thread.
    188  * @param   fExecOpts           Optional execution flags:
    189  *                                  - IEM_F_BYPASS_HANDLERS
    190  *                                  - IEM_F_X86_DISREGARD_LOCK
    191  */
    192 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
    193 {
    194     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    195     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    196     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    197     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    198     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    199     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    200     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    201     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    202     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    203     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    204 
    205     /* Execution state: */
    206     uint32_t fExec;
    207     pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
    208 
    209     /* Decoder state: */
    210     pVCpu->iem.s.enmDefAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    211     pVCpu->iem.s.enmEffAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;
    212     if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
    213     {
    214         pVCpu->iem.s.enmDefOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    215         pVCpu->iem.s.enmEffOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;
    216     }
    217     else
    218     {
    219         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    220         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    221     }
    222     pVCpu->iem.s.fPrefixes          = 0;
    223     pVCpu->iem.s.uRexReg            = 0;
    224     pVCpu->iem.s.uRexB              = 0;
    225     pVCpu->iem.s.uRexIndex          = 0;
    226     pVCpu->iem.s.idxPrefix          = 0;
    227     pVCpu->iem.s.uVex3rdReg         = 0;
    228     pVCpu->iem.s.uVexLength         = 0;
    229     pVCpu->iem.s.fEvexStuff         = 0;
    230     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    231 #ifdef IEM_WITH_CODE_TLB
    232     pVCpu->iem.s.pbInstrBuf         = NULL;
    233     pVCpu->iem.s.offInstrNextByte   = 0;
    234     pVCpu->iem.s.offCurInstrStart   = 0;
    235 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    236     pVCpu->iem.s.offOpcode          = 0;
    237 # endif
    238 # ifdef VBOX_STRICT
    239     pVCpu->iem.s.GCPhysInstrBuf     = NIL_RTGCPHYS;
    240     pVCpu->iem.s.cbInstrBuf         = UINT16_MAX;
    241     pVCpu->iem.s.cbInstrBufTotal    = UINT16_MAX;
    242     pVCpu->iem.s.uInstrBufPc        = UINT64_C(0xc0ffc0ffcff0c0ff);
    243 # endif
    244 #else
    245     pVCpu->iem.s.offOpcode          = 0;
    246     pVCpu->iem.s.cbOpcode           = 0;
    247 #endif
    248     pVCpu->iem.s.offModRm           = 0;
    249     pVCpu->iem.s.cActiveMappings    = 0;
    250     pVCpu->iem.s.iNextMapping       = 0;
    251     pVCpu->iem.s.rcPassUp           = VINF_SUCCESS;
    252 
    253 #ifdef DBGFTRACE_ENABLED
    254     switch (IEM_GET_CPU_MODE(pVCpu))
    255     {
    256         case IEMMODE_64BIT:
    257             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    258             break;
    259         case IEMMODE_32BIT:
    260             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    261             break;
    262         case IEMMODE_16BIT:
    263             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    264             break;
    265     }
    266 #endif
    267 }
    268 
    269 
    270 /**
    271  * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
    272  *
    273  * This is mostly a copy of iemInitDecoder.
    274  *
    275  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    276  */
    277 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
    278 {
    279     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    280     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    281     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    282     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    283     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    284     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    285     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    286     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    287     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    288 
    289     /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
    290     AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
    291               ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
    292 
    293     IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
    294     pVCpu->iem.s.enmDefAddrMode     = enmMode;  /** @todo check if this is correct... */
    295     pVCpu->iem.s.enmEffAddrMode     = enmMode;
    296     if (enmMode != IEMMODE_64BIT)
    297     {
    298         pVCpu->iem.s.enmDefOpSize   = enmMode;  /** @todo check if this is correct... */
    299         pVCpu->iem.s.enmEffOpSize   = enmMode;
    300     }
    301     else
    302     {
    303         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    304         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    305     }
    306     pVCpu->iem.s.fPrefixes          = 0;
    307     pVCpu->iem.s.uRexReg            = 0;
    308     pVCpu->iem.s.uRexB              = 0;
    309     pVCpu->iem.s.uRexIndex          = 0;
    310     pVCpu->iem.s.idxPrefix          = 0;
    311     pVCpu->iem.s.uVex3rdReg         = 0;
    312     pVCpu->iem.s.uVexLength         = 0;
    313     pVCpu->iem.s.fEvexStuff         = 0;
    314     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    315 #ifdef IEM_WITH_CODE_TLB
    316     if (pVCpu->iem.s.pbInstrBuf)
    317     {
    318         uint64_t off = (enmMode == IEMMODE_64BIT
    319                         ? pVCpu->cpum.GstCtx.rip
    320                         : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
    321                      - pVCpu->iem.s.uInstrBufPc;
    322         if (off < pVCpu->iem.s.cbInstrBufTotal)
    323         {
    324             pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
    325             pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
    326             if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
    327                 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
    328             else
    329                 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
    330         }
    331         else
    332         {
    333             pVCpu->iem.s.pbInstrBuf       = NULL;
    334             pVCpu->iem.s.offInstrNextByte = 0;
    335             pVCpu->iem.s.offCurInstrStart = 0;
    336             pVCpu->iem.s.cbInstrBuf       = 0;
    337             pVCpu->iem.s.cbInstrBufTotal  = 0;
    338             pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    339         }
    340     }
    341     else
    342     {
    343         pVCpu->iem.s.offInstrNextByte = 0;
    344         pVCpu->iem.s.offCurInstrStart = 0;
    345         pVCpu->iem.s.cbInstrBuf       = 0;
    346         pVCpu->iem.s.cbInstrBufTotal  = 0;
    347 # ifdef VBOX_STRICT
    348         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    349 # endif
    350     }
    351 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    352     pVCpu->iem.s.offOpcode          = 0;
    353 # endif
    354 #else  /* !IEM_WITH_CODE_TLB */
    355     pVCpu->iem.s.cbOpcode           = 0;
    356     pVCpu->iem.s.offOpcode          = 0;
    357 #endif /* !IEM_WITH_CODE_TLB */
    358     pVCpu->iem.s.offModRm           = 0;
    359     Assert(pVCpu->iem.s.cActiveMappings == 0);
    360     pVCpu->iem.s.iNextMapping       = 0;
    361     Assert(pVCpu->iem.s.rcPassUp   == VINF_SUCCESS);
    362     Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
    363 
    364 #ifdef DBGFTRACE_ENABLED
    365     switch (enmMode)
    366     {
    367         case IEMMODE_64BIT:
    368             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    369             break;
    370         case IEMMODE_32BIT:
    371             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    372             break;
    373         case IEMMODE_16BIT:
    374             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    375             break;
    376     }
    377 #endif
    378 }
    379 
    380 
    381 /**
    382  * Prefetch opcodes the first time when starting executing.
    383  *
    384  * @returns Strict VBox status code.
    385  * @param   pVCpu               The cross context virtual CPU structure of the
    386  *                              calling thread.
    387  * @param   fExecOpts           Optional execution flags:
    388  *                                  - IEM_F_BYPASS_HANDLERS
    389  *                                  - IEM_F_X86_DISREGARD_LOCK
    390  */
    391 DECLINLINE(VBOXSTRICTRC) iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
    392 {
    393     iemInitDecoder(pVCpu, fExecOpts);
    394 
    395 #ifndef IEM_WITH_CODE_TLB
    396     return iemOpcodeFetchPrefetch(pVCpu);
    397 #else
    398     return VINF_SUCCESS;
    399 #endif
    400 }
    401 
    402 
    40367/** @name   Memory access.
    40468 *
    40569 * @{
    40670 */
    407 
    408 #undef  LOG_GROUP
    409 #define LOG_GROUP LOG_GROUP_IEM_MEM
    41071
    41172#if 0 /*unused*/
     
    1152813
    1153814
    1154 #ifdef LOG_ENABLED
    1155 /**
    1156  * Logs the current instruction.
    1157  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1158  * @param   fSameCtx    Set if we have the same context information as the VMM,
    1159  *                      clear if we may have already executed an instruction in
    1160  *                      our debug context. When clear, we assume IEMCPU holds
    1161  *                      valid CPU mode info.
    1162  *
    1163  *                      The @a fSameCtx parameter is now misleading and obsolete.
    1164  * @param   pszFunction The IEM function doing the execution.
    1165  */
    1166 static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
    1167 {
    1168 # ifdef IN_RING3
    1169     if (LogIs2Enabled())
    1170     {
    1171         char     szInstr[256];
    1172         uint32_t cbInstr = 0;
    1173         if (fSameCtx)
    1174             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
    1175                                DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
    1176                                szInstr, sizeof(szInstr), &cbInstr);
    1177         else
    1178         {
    1179             uint32_t fFlags = 0;
    1180             switch (IEM_GET_CPU_MODE(pVCpu))
    1181             {
    1182                 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
    1183                 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
    1184                 case IEMMODE_16BIT:
    1185                     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
    1186                         fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
    1187                     else
    1188                         fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
    1189                     break;
    1190             }
    1191             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
    1192                                szInstr, sizeof(szInstr), &cbInstr);
    1193         }
    1194 
    1195         PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    1196         Log2(("**** %s fExec=%x\n"
    1197               " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
    1198               " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
    1199               " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
    1200               " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
    1201               " %s\n"
    1202               , pszFunction, pVCpu->iem.s.fExec,
    1203               pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
    1204               pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
    1205               pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
    1206               pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
    1207               pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
    1208               szInstr));
    1209 
    1210         /* This stuff sucks atm. as it fills the log with MSRs. */
    1211         //if (LogIs3Enabled())
    1212         //    DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
    1213     }
    1214     else
    1215 # endif
    1216         LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
    1217                  pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
    1218     RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
    1219 }
    1220 #endif /* LOG_ENABLED */
    1221 
    1222 
    1223 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1224 /**
    1225  * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
    1226  * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
    1227  *
    1228  * @returns Modified rcStrict.
    1229  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    1230  * @param   rcStrict    The instruction execution status.
    1231  */
    1232 static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
    1233 {
    1234     Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
    1235     if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
    1236     {
    1237         /* VMX preemption timer takes priority over NMI-window exits. */
    1238         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
    1239         {
    1240             rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
    1241             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
    1242         }
    1243         /*
    1244          * Check remaining intercepts.
    1245          *
    1246          * NMI-window and Interrupt-window VM-exits.
    1247          * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
    1248          * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
    1249          *
    1250          * See Intel spec. 26.7.6 "NMI-Window Exiting".
    1251          * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    1252          */
    1253         else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
    1254                  && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    1255                  && !TRPMHasTrap(pVCpu))
    1256         {
    1257             Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
    1258             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
    1259                 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
    1260             {
    1261                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
    1262                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
    1263             }
    1264             else if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
    1265                      && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
    1266             {
    1267                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
    1268                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
    1269             }
    1270         }
    1271     }
    1272     /* TPR-below threshold/APIC write has the highest priority. */
    1273     else  if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
    1274     {
    1275         rcStrict = iemVmxApicWriteEmulation(pVCpu);
    1276         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    1277         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
    1278     }
    1279     /* MTF takes priority over VMX-preemption timer. */
    1280     else
    1281     {
    1282         rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
    1283         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    1284         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
    1285     }
    1286     return rcStrict;
    1287 }
    1288 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1289 
    1290 
    1291 /**
    1292  * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
    1293  * IEMExecOneBypass and friends.
    1294  *
    1295  * Similar code is found in IEMExecLots.
    1296  *
    1297  * @return  Strict VBox status code.
    1298  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1299  * @param   fExecuteInhibit     If set, execute the instruction following CLI,
    1300  *                      POP SS and MOV SS,GR.
    1301  * @param   pszFunction The calling function name.
    1302  */
    1303 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
    1304 {
    1305     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1306     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1307     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1308     RT_NOREF_PV(pszFunction);
    1309 
    1310 #ifdef IEM_WITH_SETJMP
    1311     VBOXSTRICTRC rcStrict;
    1312     IEM_TRY_SETJMP(pVCpu, rcStrict)
    1313     {
    1314         uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1315         rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1316     }
    1317     IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1318     {
    1319         pVCpu->iem.s.cLongJumps++;
    1320     }
    1321     IEM_CATCH_LONGJMP_END(pVCpu);
    1322 #else
    1323     uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1324     VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1325 #endif
    1326     if (rcStrict == VINF_SUCCESS)
    1327         pVCpu->iem.s.cInstructions++;
    1328     if (pVCpu->iem.s.cActiveMappings > 0)
    1329     {
    1330         Assert(rcStrict != VINF_SUCCESS);
    1331         iemMemRollback(pVCpu);
    1332     }
    1333     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1334     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1335     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1336 
    1337 //#ifdef DEBUG
    1338 //    AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
    1339 //#endif
    1340 
    1341 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1342     /*
    1343      * Perform any VMX nested-guest instruction boundary actions.
    1344      *
    1345      * If any of these causes a VM-exit, we must skip executing the next
    1346      * instruction (would run into stale page tables). A VM-exit makes sure
    1347      * there is no interrupt-inhibition, so that should ensure we don't go
    1348      * to try execute the next instruction. Clearing fExecuteInhibit is
    1349      * problematic because of the setjmp/longjmp clobbering above.
    1350      */
    1351     if (   !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1352                                      | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
    1353         || rcStrict != VINF_SUCCESS)
    1354     { /* likely */ }
    1355     else
    1356         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1357 #endif
    1358 
    1359     /* Execute the next instruction as well if a cli, pop ss or
    1360        mov ss, Gr has just completed successfully. */
    1361     if (   fExecuteInhibit
    1362         && rcStrict == VINF_SUCCESS
    1363         && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    1364     {
    1365         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
    1366         if (rcStrict == VINF_SUCCESS)
    1367         {
    1368 #ifdef LOG_ENABLED
    1369             iemLogCurInstr(pVCpu, false, pszFunction);
    1370 #endif
    1371 #ifdef IEM_WITH_SETJMP
    1372             IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
    1373             {
    1374                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1375                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1376             }
    1377             IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1378             {
    1379                 pVCpu->iem.s.cLongJumps++;
    1380             }
    1381             IEM_CATCH_LONGJMP_END(pVCpu);
    1382 #else
    1383             IEM_OPCODE_GET_FIRST_U8(&b);
    1384             rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1385 #endif
    1386             if (rcStrict == VINF_SUCCESS)
    1387             {
    1388                 pVCpu->iem.s.cInstructions++;
    1389 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1390                 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1391                                               | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
    1392                 { /* likely */ }
    1393                 else
    1394                     rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1395 #endif
    1396             }
    1397             if (pVCpu->iem.s.cActiveMappings > 0)
    1398             {
    1399                 Assert(rcStrict != VINF_SUCCESS);
    1400                 iemMemRollback(pVCpu);
    1401             }
    1402             AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1403             AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1404             AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1405         }
    1406         else if (pVCpu->iem.s.cActiveMappings > 0)
    1407             iemMemRollback(pVCpu);
    1408         /** @todo drop this after we bake this change into RIP advancing. */
    1409         CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
    1410     }
    1411 
    1412     /*
    1413      * Return value fiddling, statistics and sanity assertions.
    1414      */
    1415     rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1416 
    1417     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    1418     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    1419     return rcStrict;
    1420 }
    1421 
    1422 
    1423 /**
    1424  * Execute one instruction.
    1425  *
    1426  * @return  Strict VBox status code.
    1427  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1428  */
    1429 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
    1430 {
    1431     AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
    1432 #ifdef LOG_ENABLED
    1433     iemLogCurInstr(pVCpu, true, "IEMExecOne");
    1434 #endif
    1435 
    1436     /*
    1437      * Do the decoding and emulation.
    1438      */
    1439     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1440     if (rcStrict == VINF_SUCCESS)
    1441         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
    1442     else if (pVCpu->iem.s.cActiveMappings > 0)
    1443         iemMemRollback(pVCpu);
    1444 
    1445     if (rcStrict != VINF_SUCCESS)
    1446         LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    1447                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    1448     return rcStrict;
    1449 }
    1450 
    1451 
    1452 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    1453                                                         const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    1454 {
    1455     VBOXSTRICTRC rcStrict;
    1456     if (   cbOpcodeBytes
    1457         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    1458     {
    1459         iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
    1460 #ifdef IEM_WITH_CODE_TLB
    1461         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    1462         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    1463         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    1464         pVCpu->iem.s.offCurInstrStart = 0;
    1465         pVCpu->iem.s.offInstrNextByte = 0;
    1466         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    1467 #else
    1468         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    1469         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    1470 #endif
    1471         rcStrict = VINF_SUCCESS;
    1472     }
    1473     else
    1474         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1475     if (rcStrict == VINF_SUCCESS)
    1476         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
    1477     else if (pVCpu->iem.s.cActiveMappings > 0)
    1478         iemMemRollback(pVCpu);
    1479 
    1480     return rcStrict;
    1481 }
    1482 
    1483 
    1484 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
    1485 {
    1486     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    1487     if (rcStrict == VINF_SUCCESS)
    1488         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");
    1489     else if (pVCpu->iem.s.cActiveMappings > 0)
    1490         iemMemRollback(pVCpu);
    1491 
    1492     return rcStrict;
    1493 }
    1494 
    1495 
    1496 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    1497                                                               const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    1498 {
    1499     VBOXSTRICTRC rcStrict;
    1500     if (   cbOpcodeBytes
    1501         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    1502     {
    1503         iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
    1504 #ifdef IEM_WITH_CODE_TLB
    1505         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    1506         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    1507         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    1508         pVCpu->iem.s.offCurInstrStart = 0;
    1509         pVCpu->iem.s.offInstrNextByte = 0;
    1510         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    1511 #else
    1512         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    1513         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    1514 #endif
    1515         rcStrict = VINF_SUCCESS;
    1516     }
    1517     else
    1518         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    1519     if (rcStrict == VINF_SUCCESS)
    1520         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
    1521     else if (pVCpu->iem.s.cActiveMappings > 0)
    1522         iemMemRollback(pVCpu);
    1523 
    1524     return rcStrict;
    1525 }
    1526 
    1527 
    1528 /**
    1529  * For handling split cacheline lock operations when the host has split-lock
    1530  * detection enabled.
    1531  *
    1532  * This will cause the interpreter to disregard the lock prefix and implicit
    1533  * locking (xchg).
    1534  *
    1535  * @returns Strict VBox status code.
    1536  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    1537  */
    1538 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
    1539 {
    1540     /*
    1541      * Do the decoding and emulation.
    1542      */
    1543     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
    1544     if (rcStrict == VINF_SUCCESS)
    1545         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
    1546     else if (pVCpu->iem.s.cActiveMappings > 0)
    1547         iemMemRollback(pVCpu);
    1548 
    1549     if (rcStrict != VINF_SUCCESS)
    1550         LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    1551                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    1552     return rcStrict;
    1553 }
    1554 
    1555 
    1556 /**
    1557  * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
    1558  * inject a pending TRPM trap.
    1559  */
    1560 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
    1561 {
    1562     Assert(TRPMHasTrap(pVCpu));
    1563 
    1564     if (   !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    1565         && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    1566     {
    1567         /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
    1568 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    1569         bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
    1570         if (fIntrEnabled)
    1571         {
    1572             if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
    1573                 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    1574             else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    1575                 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
    1576             else
    1577             {
    1578                 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
    1579                 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
    1580             }
    1581         }
    1582 #else
    1583         bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    1584 #endif
    1585         if (fIntrEnabled)
    1586         {
    1587             uint8_t     u8TrapNo;
    1588             TRPMEVENT   enmType;
    1589             uint32_t    uErrCode;
    1590             RTGCPTR     uCr2;
    1591             int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
    1592             AssertRC(rc2);
    1593             Assert(enmType == TRPM_HARDWARE_INT);
    1594             VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
    1595 
    1596             TRPMResetTrap(pVCpu);
    1597 
    1598 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    1599             /* Injecting an event may cause a VM-exit. */
    1600             if (   rcStrict != VINF_SUCCESS
    1601                 && rcStrict != VINF_IEM_RAISED_XCPT)
    1602                 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1603 #else
    1604             NOREF(rcStrict);
    1605 #endif
    1606         }
    1607     }
    1608 
    1609     return VINF_SUCCESS;
    1610 }
    1611 
    1612 
    1613 VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
    1614 {
    1615     uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
    1616     AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
    1617     Assert(cMaxInstructions > 0);
    1618 
    1619     /*
    1620      * See if there is an interrupt pending in TRPM, inject it if we can.
    1621      */
    1622     /** @todo What if we are injecting an exception and not an interrupt? Is that
    1623      *        possible here? For now we assert it is indeed only an interrupt. */
    1624     if (!TRPMHasTrap(pVCpu))
    1625     { /* likely */ }
    1626     else
    1627     {
    1628         VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
    1629         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1630         { /*likely */ }
    1631         else
    1632             return rcStrict;
    1633     }
    1634 
    1635     /*
    1636      * Initial decoder init w/ prefetch, then setup setjmp.
    1637      */
    1638     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1639     if (rcStrict == VINF_SUCCESS)
    1640     {
    1641 #ifdef IEM_WITH_SETJMP
    1642         pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
    1643         IEM_TRY_SETJMP(pVCpu, rcStrict)
    1644 #endif
    1645         {
    1646             /*
    1647              * The run loop.  We limit ourselves to 4096 instructions right now.
    1648              */
    1649             uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
    1650             PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1651             for (;;)
    1652             {
    1653                 /*
    1654                  * Log the state.
    1655                  */
    1656 #ifdef LOG_ENABLED
    1657                 iemLogCurInstr(pVCpu, true, "IEMExecLots");
    1658 #endif
    1659 
    1660                 /*
    1661                  * Do the decoding and emulation.
    1662                  */
    1663                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1664                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1665 #ifdef VBOX_STRICT
    1666                 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
    1667 #endif
    1668                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1669                 {
    1670                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1671                     pVCpu->iem.s.cInstructions++;
    1672 
    1673 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1674                     /* Perform any VMX nested-guest instruction boundary actions. */
    1675                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    1676                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1677                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    1678                     { /* likely */ }
    1679                     else
    1680                     {
    1681                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1682                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1683                             fCpu = pVCpu->fLocalForcedActions;
    1684                         else
    1685                         {
    1686                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1687                             break;
    1688                         }
    1689                     }
    1690 #endif
    1691                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    1692                     {
    1693 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    1694                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    1695 #endif
    1696                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    1697                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    1698                                                       | VMCPU_FF_TLB_FLUSH
    1699                                                       | VMCPU_FF_UNHALT );
    1700 
    1701                         if (RT_LIKELY(   (   !fCpu
    1702                                           || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    1703                                               && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
    1704                                       && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
    1705                         {
    1706                             if (--cMaxInstructionsGccStupidity > 0)
    1707                             {
    1708                                 /* Poll timers every now an then according to the caller's specs. */
    1709                                 if (   (cMaxInstructionsGccStupidity & cPollRate) != 0
    1710                                     || !TMTimerPollBool(pVM, pVCpu))
    1711                                 {
    1712                                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1713                                     iemReInitDecoder(pVCpu);
    1714                                     continue;
    1715                                 }
    1716                             }
    1717                         }
    1718                     }
    1719                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1720                 }
    1721                 else if (pVCpu->iem.s.cActiveMappings > 0)
    1722                     iemMemRollback(pVCpu);
    1723                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1724                 break;
    1725             }
    1726         }
    1727 #ifdef IEM_WITH_SETJMP
    1728         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1729         {
    1730             if (pVCpu->iem.s.cActiveMappings > 0)
    1731                 iemMemRollback(pVCpu);
    1732 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    1733             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1734 # endif
    1735             pVCpu->iem.s.cLongJumps++;
    1736         }
    1737         IEM_CATCH_LONGJMP_END(pVCpu);
    1738 #endif
    1739 
    1740         /*
    1741          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    1742          */
    1743         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    1744         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    1745     }
    1746     else
    1747     {
    1748         if (pVCpu->iem.s.cActiveMappings > 0)
    1749             iemMemRollback(pVCpu);
    1750 
    1751 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    1752         /*
    1753          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    1754          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    1755          */
    1756         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1757 #endif
    1758     }
    1759 
    1760     /*
    1761      * Maybe re-enter raw-mode and log.
    1762      */
    1763     if (rcStrict != VINF_SUCCESS)
    1764         LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    1765                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    1766     if (pcInstructions)
    1767         *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
    1768     return rcStrict;
    1769 }
    1770 
    1771 
    1772 /**
    1773  * Interface used by EMExecuteExec, does exit statistics and limits.
    1774  *
    1775  * @returns Strict VBox status code.
    1776  * @param   pVCpu               The cross context virtual CPU structure.
    1777  * @param   fWillExit           To be defined.
    1778  * @param   cMinInstructions    Minimum number of instructions to execute before checking for FFs.
    1779  * @param   cMaxInstructions    Maximum number of instructions to execute.
    1780  * @param   cMaxInstructionsWithoutExits
    1781  *                              The max number of instructions without exits.
    1782  * @param   pStats              Where to return statistics.
    1783  */
    1784 VMM_INT_DECL(VBOXSTRICTRC)
    1785 IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
    1786                 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
    1787 {
    1788     NOREF(fWillExit); /** @todo define flexible exit crits */
    1789 
    1790     /*
    1791      * Initialize return stats.
    1792      */
    1793     pStats->cInstructions    = 0;
    1794     pStats->cExits           = 0;
    1795     pStats->cMaxExitDistance = 0;
    1796     pStats->cReserved        = 0;
    1797 
    1798     /*
    1799      * Initial decoder init w/ prefetch, then setup setjmp.
    1800      */
    1801     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1802     if (rcStrict == VINF_SUCCESS)
    1803     {
    1804 #ifdef IEM_WITH_SETJMP
    1805         pVCpu->iem.s.cActiveMappings     = 0; /** @todo wtf?!? */
    1806         IEM_TRY_SETJMP(pVCpu, rcStrict)
    1807 #endif
    1808         {
    1809 #ifdef IN_RING0
    1810             bool const fCheckPreemptionPending   = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    1811 #endif
    1812             uint32_t   cInstructionSinceLastExit = 0;
    1813 
    1814             /*
    1815              * The run loop.  We limit ourselves to 4096 instructions right now.
    1816              */
    1817             PVM pVM = pVCpu->CTX_SUFF(pVM);
    1818             for (;;)
    1819             {
    1820                 /*
    1821                  * Log the state.
    1822                  */
    1823 #ifdef LOG_ENABLED
    1824                 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
    1825 #endif
    1826 
    1827                 /*
    1828                  * Do the decoding and emulation.
    1829                  */
    1830                 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
    1831 
    1832                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1833                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1834 
    1835                 if (   cPotentialExits != pVCpu->iem.s.cPotentialExits
    1836                     && cInstructionSinceLastExit > 0 /* don't count the first */ )
    1837                 {
    1838                     pStats->cExits += 1;
    1839                     if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
    1840                         pStats->cMaxExitDistance = cInstructionSinceLastExit;
    1841                     cInstructionSinceLastExit = 0;
    1842                 }
    1843 
    1844                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1845                 {
    1846                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1847                     pVCpu->iem.s.cInstructions++;
    1848                     pStats->cInstructions++;
    1849                     cInstructionSinceLastExit++;
    1850 
    1851 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1852                     /* Perform any VMX nested-guest instruction boundary actions. */
    1853                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    1854                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1855                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    1856                     { /* likely */ }
    1857                     else
    1858                     {
    1859                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1860                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1861                             fCpu = pVCpu->fLocalForcedActions;
    1862                         else
    1863                         {
    1864                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1865                             break;
    1866                         }
    1867                     }
    1868 #endif
    1869                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    1870                     {
    1871 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    1872                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    1873 #endif
    1874                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    1875                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    1876                                                       | VMCPU_FF_TLB_FLUSH
    1877                                                       | VMCPU_FF_UNHALT );
    1878                         if (RT_LIKELY(   (   (   !fCpu
    1879                                               || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    1880                                                   && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
    1881                                           && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
    1882                                       || pStats->cInstructions < cMinInstructions))
    1883                         {
    1884                             if (pStats->cInstructions < cMaxInstructions)
    1885                             {
    1886                                 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
    1887                                 {
    1888 #ifdef IN_RING0
    1889                                     if (   !fCheckPreemptionPending
    1890                                         || !RTThreadPreemptIsPending(NIL_RTTHREAD))
    1891 #endif
    1892                                     {
    1893                                         Assert(pVCpu->iem.s.cActiveMappings == 0);
    1894                                         iemReInitDecoder(pVCpu);
    1895                                         continue;
    1896                                     }
    1897 #ifdef IN_RING0
    1898                                     rcStrict = VINF_EM_RAW_INTERRUPT;
    1899                                     break;
    1900 #endif
    1901                                 }
    1902                             }
    1903                         }
    1904                         Assert(!(fCpu & VMCPU_FF_IEM));
    1905                     }
    1906                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1907                 }
    1908                 else if (pVCpu->iem.s.cActiveMappings > 0)
    1909                         iemMemRollback(pVCpu);
    1910                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1911                 break;
    1912             }
    1913         }
    1914 #ifdef IEM_WITH_SETJMP
    1915         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1916         {
    1917             if (pVCpu->iem.s.cActiveMappings > 0)
    1918                 iemMemRollback(pVCpu);
    1919             pVCpu->iem.s.cLongJumps++;
    1920         }
    1921         IEM_CATCH_LONGJMP_END(pVCpu);
    1922 #endif
    1923 
    1924         /*
    1925          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    1926          */
    1927         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    1928         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    1929     }
    1930     else
    1931     {
    1932         if (pVCpu->iem.s.cActiveMappings > 0)
    1933             iemMemRollback(pVCpu);
    1934 
    1935 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    1936         /*
    1937          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    1938          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    1939          */
    1940         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1941 #endif
    1942     }
    1943 
    1944     /*
    1945      * Maybe re-enter raw-mode and log.
    1946      */
    1947     if (rcStrict != VINF_SUCCESS)
    1948         LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
    1949                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
    1950                  pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
    1951     return rcStrict;
    1952 }
    1953 
    1954 
    1955 /**
    1956  * Injects a trap, fault, abort, software interrupt or external interrupt.
    1957  *
    1958  * The parameter list matches TRPMQueryTrapAll pretty closely.
    1959  *
    1960  * @returns Strict VBox status code.
    1961  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    1962  * @param   u8TrapNo            The trap number.
    1963  * @param   enmType             What type is it (trap/fault/abort), software
    1964  *                              interrupt or hardware interrupt.
    1965  * @param   uErrCode            The error code if applicable.
    1966  * @param   uCr2                The CR2 value if applicable.
    1967  * @param   cbInstr             The instruction length (only relevant for
    1968  *                              software interrupts).
    1969  * @note    x86 specific, but difficult to move due to iemInitDecoder dep.
    1970  */
    1971 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
    1972                                          uint8_t cbInstr)
    1973 {
    1974     iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
    1975 #ifdef DBGFTRACE_ENABLED
    1976     RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
    1977                       u8TrapNo, enmType, uErrCode, uCr2);
    1978 #endif
    1979 
    1980     uint32_t fFlags;
    1981     switch (enmType)
    1982     {
    1983         case TRPM_HARDWARE_INT:
    1984             Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
    1985             fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
    1986             uErrCode = uCr2 = 0;
    1987             break;
    1988 
    1989         case TRPM_SOFTWARE_INT:
    1990             Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
    1991             fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
    1992             uErrCode = uCr2 = 0;
    1993             break;
    1994 
    1995         case TRPM_TRAP:
    1996         case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
    1997             Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
    1998             fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
    1999             if (u8TrapNo == X86_XCPT_PF)
    2000                 fFlags |= IEM_XCPT_FLAGS_CR2;
    2001             switch (u8TrapNo)
    2002             {
    2003                 case X86_XCPT_DF:
    2004                 case X86_XCPT_TS:
    2005                 case X86_XCPT_NP:
    2006                 case X86_XCPT_SS:
    2007                 case X86_XCPT_PF:
    2008                 case X86_XCPT_AC:
    2009                 case X86_XCPT_GP:
    2010                     fFlags |= IEM_XCPT_FLAGS_ERR;
    2011                     break;
    2012             }
    2013             break;
    2014 
    2015         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    2016     }
    2017 
    2018     VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
    2019 
    2020     if (pVCpu->iem.s.cActiveMappings > 0)
    2021         iemMemRollback(pVCpu);
    2022 
    2023     return rcStrict;
    2024 }
    2025 
    2026 
    2027 /**
    2028  * Injects the active TRPM event.
    2029  *
    2030  * @returns Strict VBox status code.
    2031  * @param   pVCpu               The cross context virtual CPU structure.
    2032  */
    2033 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
    2034 {
    2035 #ifndef IEM_IMPLEMENTS_TASKSWITCH
    2036     IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
    2037 #else
    2038     uint8_t     u8TrapNo;
    2039     TRPMEVENT   enmType;
    2040     uint32_t    uErrCode;
    2041     RTGCUINTPTR uCr2;
    2042     uint8_t     cbInstr;
    2043     int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
    2044     if (RT_FAILURE(rc))
    2045         return rc;
    2046 
    2047     /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
    2048      *        ICEBP \#DB injection as a special case. */
    2049     VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
    2050 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2051     if (rcStrict == VINF_SVM_VMEXIT)
    2052         rcStrict = VINF_SUCCESS;
    2053 #endif
    2054 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2055     if (rcStrict == VINF_VMX_VMEXIT)
    2056         rcStrict = VINF_SUCCESS;
    2057 #endif
    2058     /** @todo Are there any other codes that imply the event was successfully
    2059      *        delivered to the guest? See @bugref{6607}.  */
    2060     if (   rcStrict == VINF_SUCCESS
    2061         || rcStrict == VINF_IEM_RAISED_XCPT)
    2062         TRPMResetTrap(pVCpu);
    2063 
    2064     return rcStrict;
    2065 #endif
    2066 }
    2067 
    2068 
    2069 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
    2070 {
    2071     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    2072     return VERR_NOT_IMPLEMENTED;
    2073 }
    2074 
    2075 
    2076 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
    2077 {
    2078     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    2079     return VERR_NOT_IMPLEMENTED;
    2080 }
    2081 
    2082815#ifdef IN_RING3
    2083816
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllXcpt-x86.cpp

    r108220 r108248  
    9090
    9191
    92 /*********************************************************************************************************************************
    93 *   Global Variables                                                                                                             *
    94 *********************************************************************************************************************************/
    95 #if defined(IEM_LOG_MEMORY_WRITES)
    96 /** What IEM just wrote. */
    97 uint8_t g_abIemWrote[256];
    98 /** How much IEM just wrote. */
    99 size_t g_cbIemWrote;
    100 #endif
    101 
    10292
    10393/** @name  Misc Worker Functions.
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette