VirtualBox

Ignore:
Timestamp:
Feb 17, 2025 12:34:56 AM (2 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167569
Message:

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r108247 r108248  
    399399#endif
    400400}
    401 
    402 
    403 /** @name   Memory access.
    404  *
    405  * @{
    406  */
    407 
    408 #undef  LOG_GROUP
    409 #define LOG_GROUP LOG_GROUP_IEM_MEM
    410 
    411 #if 0 /*unused*/
    412 /**
    413  * Looks up a memory mapping entry.
    414  *
    415  * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
    416  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    417  * @param   pvMem           The memory address.
    418  * @param   fAccess         The access to.
    419  */
    420 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    421 {
    422     Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    423     fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
    424     if (   pVCpu->iem.s.aMemMappings[0].pv == pvMem
    425         && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    426         return 0;
    427     if (   pVCpu->iem.s.aMemMappings[1].pv == pvMem
    428         && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    429         return 1;
    430     if (   pVCpu->iem.s.aMemMappings[2].pv == pvMem
    431         && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    432         return 2;
    433     return VERR_NOT_FOUND;
    434 }
    435 #endif
    436 
    437 /**
    438  * Finds a free memmap entry when using iNextMapping doesn't work.
    439  *
    440  * @returns Memory mapping index, 1024 on failure.
    441  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    442  */
    443 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
    444 {
    445     /*
    446      * The easy case.
    447      */
    448     if (pVCpu->iem.s.cActiveMappings == 0)
    449     {
    450         pVCpu->iem.s.iNextMapping = 1;
    451         return 0;
    452     }
    453 
    454     /* There should be enough mappings for all instructions. */
    455     AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
    456 
    457     for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
    458         if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
    459             return i;
    460 
    461     AssertFailedReturn(1024);
    462 }
    463 
    464 
    465 /**
    466  * Commits a bounce buffer that needs writing back and unmaps it.
    467  *
    468  * @returns Strict VBox status code.
    469  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    470  * @param   iMemMap         The index of the buffer to commit.
    471  * @param   fPostponeFail   Whether we can postpone writer failures to ring-3.
    472  *                          Always false in ring-3, obviously.
    473  */
    474 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
    475 {
    476     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    477     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    478 #ifdef IN_RING3
    479     Assert(!fPostponeFail);
    480     RT_NOREF_PV(fPostponeFail);
    481 #endif
    482 
    483     /*
    484      * Do the writing.
    485      */
    486     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    487     if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
    488     {
    489         uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    490         uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    491         uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    492         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    493         {
    494             /*
    495              * Carefully and efficiently dealing with access handler return
    496              * codes make this a little bloated.
    497              */
    498             VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
    499                                                  pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    500                                                  pbBuf,
    501                                                  cbFirst,
    502                                                  PGMACCESSORIGIN_IEM);
    503             if (rcStrict == VINF_SUCCESS)
    504             {
    505                 if (cbSecond)
    506                 {
    507                     rcStrict = PGMPhysWrite(pVM,
    508                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    509                                             pbBuf + cbFirst,
    510                                             cbSecond,
    511                                             PGMACCESSORIGIN_IEM);
    512                     if (rcStrict == VINF_SUCCESS)
    513                     { /* nothing */ }
    514                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    515                     {
    516                         LogEx(LOG_GROUP_IEM,
    517                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
    518                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    519                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    520                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    521                     }
    522 #ifndef IN_RING3
    523                     else if (fPostponeFail)
    524                     {
    525                         LogEx(LOG_GROUP_IEM,
    526                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    527                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    528                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    529                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    530                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    531                         return iemSetPassUpStatus(pVCpu, rcStrict);
    532                     }
    533 #endif
    534                     else
    535                     {
    536                         LogEx(LOG_GROUP_IEM,
    537                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    538                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    539                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    540                         return rcStrict;
    541                     }
    542                 }
    543             }
    544             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    545             {
    546                 if (!cbSecond)
    547                 {
    548                     LogEx(LOG_GROUP_IEM,
    549                           ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
    550                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    551                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    552                 }
    553                 else
    554                 {
    555                     VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
    556                                                           pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    557                                                           pbBuf + cbFirst,
    558                                                           cbSecond,
    559                                                           PGMACCESSORIGIN_IEM);
    560                     if (rcStrict2 == VINF_SUCCESS)
    561                     {
    562                         LogEx(LOG_GROUP_IEM,
    563                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
    564                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    565                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    566                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    567                     }
    568                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    569                     {
    570                         LogEx(LOG_GROUP_IEM,
    571                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
    572                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    573                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    574                         PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    575                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    576                     }
    577 #ifndef IN_RING3
    578                     else if (fPostponeFail)
    579                     {
    580                         LogEx(LOG_GROUP_IEM,
    581                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    582                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    583                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    584                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    585                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    586                         return iemSetPassUpStatus(pVCpu, rcStrict);
    587                     }
    588 #endif
    589                     else
    590                     {
    591                         LogEx(LOG_GROUP_IEM,
    592                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    593                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    594                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    595                         return rcStrict2;
    596                     }
    597                 }
    598             }
    599 #ifndef IN_RING3
    600             else if (fPostponeFail)
    601             {
    602                 LogEx(LOG_GROUP_IEM,
    603                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    604                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    605                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    606                 if (!cbSecond)
    607                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
    608                 else
    609                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
    610                 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    611                 return iemSetPassUpStatus(pVCpu, rcStrict);
    612             }
    613 #endif
    614             else
    615             {
    616                 LogEx(LOG_GROUP_IEM,
    617                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    618                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    619                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    620                 return rcStrict;
    621             }
    622         }
    623         else
    624         {
    625             /*
    626              * No access handlers, much simpler.
    627              */
    628             int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
    629             if (RT_SUCCESS(rc))
    630             {
    631                 if (cbSecond)
    632                 {
    633                     rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
    634                     if (RT_SUCCESS(rc))
    635                     { /* likely */ }
    636                     else
    637                     {
    638                         LogEx(LOG_GROUP_IEM,
    639                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    640                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    641                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
    642                         return rc;
    643                     }
    644                 }
    645             }
    646             else
    647             {
    648                 LogEx(LOG_GROUP_IEM,
    649                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    650                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
    651                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    652                 return rc;
    653             }
    654         }
    655     }
    656 
    657 #if defined(IEM_LOG_MEMORY_WRITES)
    658     Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    659           RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
    660     if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
    661         Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    662               RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
    663               &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
    664 
    665     size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    666     g_cbIemWrote = cbWrote;
    667     memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
    668 #endif
    669 
    670     /*
    671      * Free the mapping entry.
    672      */
    673     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    674     Assert(pVCpu->iem.s.cActiveMappings != 0);
    675     pVCpu->iem.s.cActiveMappings--;
    676     return VINF_SUCCESS;
    677 }
    678 
    679 
    680 /**
    681  * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
    682  * @todo duplicated
    683  */
    684 DECL_FORCE_INLINE(uint32_t)
    685 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
    686 {
    687     bool const  fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
    688     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    689         return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    690     return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    691 }
    692 
    693 
    694 /**
    695  * iemMemMap worker that deals with a request crossing pages.
    696  */
    697 VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
    698                                             size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
    699 {
    700     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
    701     Assert(cbMem <= GUEST_PAGE_SIZE);
    702 
    703     /*
    704      * Do the address translations.
    705      */
    706     uint32_t const cbFirstPage  = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
    707     RTGCPHYS GCPhysFirst;
    708     VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
    709     if (rcStrict != VINF_SUCCESS)
    710         return rcStrict;
    711     Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
    712 
    713     uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
    714     RTGCPHYS GCPhysSecond;
    715     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    716                                                  cbSecondPage, fAccess, &GCPhysSecond);
    717     if (rcStrict != VINF_SUCCESS)
    718         return rcStrict;
    719     Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
    720     GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
    721 
    722     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    723 
    724     /*
    725      * Check for data breakpoints.
    726      */
    727     if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
    728     { /* likely */ }
    729     else
    730     {
    731         uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
    732         fDataBps         |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    733                                                       cbSecondPage, fAccess);
    734         pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
    735         if (fDataBps > 1)
    736             LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
    737                                   fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    738     }
    739 
    740     /*
    741      * Read in the current memory content if it's a read, execute or partial
    742      * write access.
    743      */
    744     uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    745 
    746     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    747     {
    748         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    749         {
    750             /*
    751              * Must carefully deal with access handler status codes here,
    752              * makes the code a bit bloated.
    753              */
    754             rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
    755             if (rcStrict == VINF_SUCCESS)
    756             {
    757                 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    758                 if (rcStrict == VINF_SUCCESS)
    759                 { /*likely */ }
    760                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    761                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    762                 else
    763                 {
    764                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
    765                                           GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    766                     return rcStrict;
    767                 }
    768             }
    769             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    770             {
    771                 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    772                 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    773                 {
    774                     PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    775                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    776                 }
    777                 else
    778                 {
    779                     LogEx(LOG_GROUP_IEM,
    780                           ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
    781                            GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
    782                     return rcStrict2;
    783                 }
    784             }
    785             else
    786             {
    787                 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    788                                       GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    789                 return rcStrict;
    790             }
    791         }
    792         else
    793         {
    794             /*
    795              * No informational status codes here, much more straight forward.
    796              */
    797             int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
    798             if (RT_SUCCESS(rc))
    799             {
    800                 Assert(rc == VINF_SUCCESS);
    801                 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
    802                 if (RT_SUCCESS(rc))
    803                     Assert(rc == VINF_SUCCESS);
    804                 else
    805                 {
    806                     LogEx(LOG_GROUP_IEM,
    807                           ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
    808                     return rc;
    809                 }
    810             }
    811             else
    812             {
    813                 LogEx(LOG_GROUP_IEM,
    814                       ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
    815                 return rc;
    816             }
    817         }
    818     }
    819 #ifdef VBOX_STRICT
    820     else
    821         memset(pbBuf, 0xcc, cbMem);
    822     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    823         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    824 #endif
    825     AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
    826 
    827     /*
    828      * Commit the bounce buffer entry.
    829      */
    830     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    831     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = GCPhysSecond;
    832     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbFirstPage;
    833     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = (uint16_t)cbSecondPage;
    834     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = false;
    835     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    836     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    837     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    838     pVCpu->iem.s.cActiveMappings++;
    839 
    840     *ppvMem = pbBuf;
    841     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    842     return VINF_SUCCESS;
    843 }
    844 
    845 
    846 /**
    847  * iemMemMap woker that deals with iemMemPageMap failures.
    848  */
    849 VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
    850                                        RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
    851 {
    852     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
    853 
    854     /*
    855      * Filter out conditions we can handle and the ones which shouldn't happen.
    856      */
    857     if (   rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
    858         && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
    859         && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
    860     {
    861         AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
    862         return rcMap;
    863     }
    864     pVCpu->iem.s.cPotentialExits++;
    865 
    866     /*
    867      * Read in the current memory content if it's a read, execute or partial
    868      * write access.
    869      */
    870     uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    871     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    872     {
    873         if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
    874             memset(pbBuf, 0xff, cbMem);
    875         else
    876         {
    877             int rc;
    878             if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    879             {
    880                 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
    881                 if (rcStrict == VINF_SUCCESS)
    882                 { /* nothing */ }
    883                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    884                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    885                 else
    886                 {
    887                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    888                                           GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    889                     return rcStrict;
    890                 }
    891             }
    892             else
    893             {
    894                 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
    895                 if (RT_SUCCESS(rc))
    896                 { /* likely */ }
    897                 else
    898                 {
    899                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    900                                           GCPhysFirst, rc));
    901                     return rc;
    902                 }
    903             }
    904         }
    905     }
    906 #ifdef VBOX_STRICT
    907     else
    908         memset(pbBuf, 0xcc, cbMem);
    909 #endif
    910 #ifdef VBOX_STRICT
    911     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    912         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    913 #endif
    914 
    915     /*
    916      * Commit the bounce buffer entry.
    917      */
    918     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    919     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = NIL_RTGCPHYS;
    920     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbMem;
    921     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = 0;
    922     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
    923     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    924     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    925     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    926     pVCpu->iem.s.cActiveMappings++;
    927 
    928     *ppvMem = pbBuf;
    929     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    930     return VINF_SUCCESS;
    931 }
    932 
    933 
    934 
    935 /**
    936  * Commits the guest memory if bounce buffered and unmaps it.
    937  *
    938  * @returns Strict VBox status code.
    939  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    940  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    941  */
    942 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    943 {
    944     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    945     AssertMsgReturn(   (bUnmapInfo & 0x08)
    946                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    947                     && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
    948                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    949                     VERR_NOT_FOUND);
    950 
    951     /* If it's bounce buffered, we may need to write back the buffer. */
    952     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    953     {
    954         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    955             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    956     }
    957     /* Otherwise unlock it. */
    958     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    959         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    960 
    961     /* Free the entry. */
    962     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    963     Assert(pVCpu->iem.s.cActiveMappings != 0);
    964     pVCpu->iem.s.cActiveMappings--;
    965     return VINF_SUCCESS;
    966 }
    967 
    968 
    969 /**
    970  * Rolls back the guest memory (conceptually only) and unmaps it.
    971  *
    972  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    973  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    974  */
    975 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    976 {
    977     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    978     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    979                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    980                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    981                            == ((unsigned)bUnmapInfo >> 4),
    982                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    983 
    984     /* Unlock it if necessary. */
    985     if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    986         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    987 
    988     /* Free the entry. */
    989     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    990     Assert(pVCpu->iem.s.cActiveMappings != 0);
    991     pVCpu->iem.s.cActiveMappings--;
    992 }
    993 
    994 #ifdef IEM_WITH_SETJMP
    995 
    996 /**
    997  * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
    998  *
    999  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1000  * @param   pvMem               The mapping.
    1001  * @param   fAccess             The kind of access.
    1002  */
    1003 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1004 {
    1005     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1006     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    1007                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1008                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1009                            == ((unsigned)bUnmapInfo >> 4),
    1010                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    1011 
    1012     /* If it's bounce buffered, we may need to write back the buffer. */
    1013     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1014     {
    1015         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1016         {
    1017             VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    1018             if (rcStrict == VINF_SUCCESS)
    1019                 return;
    1020             IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1021         }
    1022     }
    1023     /* Otherwise unlock it. */
    1024     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1025         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1026 
    1027     /* Free the entry. */
    1028     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1029     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1030     pVCpu->iem.s.cActiveMappings--;
    1031 }
    1032 
    1033 
    1034 /** Fallback for iemMemCommitAndUnmapRwJmp.  */
    1035 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1036 {
    1037     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    1038     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1039 }
    1040 
    1041 
    1042 /** Fallback for iemMemCommitAndUnmapAtJmp.  */
    1043 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1044 {
    1045     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    1046     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1047 }
    1048 
    1049 
    1050 /** Fallback for iemMemCommitAndUnmapWoJmp.  */
    1051 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1052 {
    1053     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    1054     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1055 }
    1056 
    1057 
    1058 /** Fallback for iemMemCommitAndUnmapRoJmp.  */
    1059 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1060 {
    1061     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
    1062     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1063 }
    1064 
    1065 
    1066 /** Fallback for iemMemRollbackAndUnmapWo.  */
    1067 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1068 {
    1069     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    1070     iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
    1071 }
    1072 
    1073 #endif /* IEM_WITH_SETJMP */
    1074 
    1075 #ifndef IN_RING3
    1076 /**
    1077  * Commits the guest memory if bounce buffered and unmaps it, if any bounce
    1078  * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
    1079  *
    1080  * Allows the instruction to be completed and retired, while the IEM user will
    1081  * return to ring-3 immediately afterwards and do the postponed writes there.
    1082  *
    1083  * @returns VBox status code (no strict statuses).  Caller must check
    1084  *          VMCPU_FF_IEM before repeating string instructions and similar stuff.
    1085  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1086  * @param   pvMem               The mapping.
    1087  * @param   fAccess             The kind of access.
    1088  */
    1089 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1090 {
    1091     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1092     AssertMsgReturn(   (bUnmapInfo & 0x08)
    1093                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1094                     &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1095                        == ((unsigned)bUnmapInfo >> 4),
    1096                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    1097                     VERR_NOT_FOUND);
    1098 
    1099     /* If it's bounce buffered, we may need to write back the buffer. */
    1100     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1101     {
    1102         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1103             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
    1104     }
    1105     /* Otherwise unlock it. */
    1106     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1107         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1108 
    1109     /* Free the entry. */
    1110     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1111     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1112     pVCpu->iem.s.cActiveMappings--;
    1113     return VINF_SUCCESS;
    1114 }
    1115 #endif
    1116 
    1117 
    1118 /**
    1119  * Rollbacks mappings, releasing page locks and such.
    1120  *
    1121  * The caller shall only call this after checking cActiveMappings.
    1122  *
    1123  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    1124  */
    1125 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
    1126 {
    1127     Assert(pVCpu->iem.s.cActiveMappings > 0);
    1128 
    1129     uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    1130     while (iMemMap-- > 0)
    1131     {
    1132         uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
    1133         if (fAccess != IEM_ACCESS_INVALID)
    1134         {
    1135             AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
    1136             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1137             if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
    1138                 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1139             AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
    1140                       ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
    1141                        iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
    1142                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
    1143             pVCpu->iem.s.cActiveMappings--;
    1144         }
    1145     }
    1146 }
    1147 
    1148 #undef  LOG_GROUP
    1149 #define LOG_GROUP LOG_GROUP_IEM
    1150 
    1151 /** @} */
    1152401
    1153402
     
    20801329}
    20811330
    2082 #ifdef IN_RING3
    2083 
    2084 /**
    2085  * Handles the unlikely and probably fatal merge cases.
    2086  *
    2087  * @returns Merged status code.
    2088  * @param   rcStrict        Current EM status code.
    2089  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    2090  *                          with @a rcStrict.
    2091  * @param   iMemMap         The memory mapping index. For error reporting only.
    2092  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2093  *                          thread, for error reporting only.
    2094  */
    2095 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
    2096                                                           unsigned iMemMap, PVMCPUCC pVCpu)
    2097 {
    2098     if (RT_FAILURE_NP(rcStrict))
    2099         return rcStrict;
    2100 
    2101     if (RT_FAILURE_NP(rcStrictCommit))
    2102         return rcStrictCommit;
    2103 
    2104     if (rcStrict == rcStrictCommit)
    2105         return rcStrictCommit;
    2106 
    2107     AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
    2108                            VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
    2109                            pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
    2110                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
    2111                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
    2112     return VERR_IOM_FF_STATUS_IPE;
    2113 }
    2114 
    2115 
    2116 /**
    2117  * Helper for IOMR3ProcessForceFlag.
    2118  *
    2119  * @returns Merged status code.
    2120  * @param   rcStrict        Current EM status code.
    2121  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    2122  *                          with @a rcStrict.
    2123  * @param   iMemMap         The memory mapping index. For error reporting only.
    2124  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2125  *                          thread, for error reporting only.
    2126  */
    2127 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
    2128 {
    2129     /* Simple. */
    2130     if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
    2131         return rcStrictCommit;
    2132 
    2133     if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
    2134         return rcStrict;
    2135 
    2136     /* EM scheduling status codes. */
    2137     if (RT_LIKELY(   rcStrict >= VINF_EM_FIRST
    2138                   && rcStrict <= VINF_EM_LAST))
    2139     {
    2140         if (RT_LIKELY(   rcStrictCommit >= VINF_EM_FIRST
    2141                       && rcStrictCommit <= VINF_EM_LAST))
    2142             return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
    2143     }
    2144 
    2145     /* Unlikely */
    2146     return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
    2147 }
    2148 
    2149 
    2150 /**
    2151  * Called by force-flag handling code when VMCPU_FF_IEM is set.
    2152  *
    2153  * @returns Merge between @a rcStrict and what the commit operation returned.
    2154  * @param   pVM         The cross context VM structure.
    2155  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2156  * @param   rcStrict    The status code returned by ring-0 or raw-mode.
    2157  */
    2158 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    2159 {
    2160     /*
    2161      * Reset the pending commit.
    2162      */
    2163     AssertMsg(  (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
    2164               & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
    2165               ("%#x %#x %#x\n",
    2166                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    2167     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
    2168 
    2169     /*
    2170      * Commit the pending bounce buffers (usually just one).
    2171      */
    2172     unsigned cBufs = 0;
    2173     unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    2174     while (iMemMap-- > 0)
    2175         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
    2176         {
    2177             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    2178             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    2179             Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
    2180 
    2181             uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    2182             uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    2183             uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2184 
    2185             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
    2186             {
    2187                 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
    2188                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    2189                                                             pbBuf,
    2190                                                             cbFirst,
    2191                                                             PGMACCESSORIGIN_IEM);
    2192                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
    2193                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
    2194                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2195                      VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
    2196             }
    2197 
    2198             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
    2199             {
    2200                 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
    2201                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2202                                                             pbBuf + cbFirst,
    2203                                                             cbSecond,
    2204                                                             PGMACCESSORIGIN_IEM);
    2205                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
    2206                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
    2207                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
    2208                      VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
    2209             }
    2210             cBufs++;
    2211             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2212         }
    2213 
    2214     AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
    2215               ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
    2216                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    2217     pVCpu->iem.s.cActiveMappings = 0;
    2218     return rcStrict;
    2219 }
    2220 
    2221 #endif /* IN_RING3 */
    2222 
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette