VirtualBox

Ignore:
Timestamp:
Feb 17, 2025 12:28:23 AM (2 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167568
Message:

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r108245 r108247  
    397397#else
    398398    return VINF_SUCCESS;
    399 #endif
    400 }
    401 
    402 
    403 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    404 /**
    405  * Worker for iemTlbInvalidateAll.
    406  */
    407 template<bool a_fGlobal>
    408 DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
    409 {
    410     if (!a_fGlobal)
    411         pTlb->cTlsFlushes++;
    412     else
    413         pTlb->cTlsGlobalFlushes++;
    414 
    415     pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
    416     if (RT_LIKELY(pTlb->uTlbRevision != 0))
    417     { /* very likely */ }
    418     else
    419     {
    420         pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
    421         pTlb->cTlbRevisionRollovers++;
    422         unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    423         while (i-- > 0)
    424             pTlb->aEntries[i * 2].uTag = 0;
    425     }
    426 
    427     pTlb->cTlbNonGlobalLargePageCurLoads    = 0;
    428     pTlb->NonGlobalLargePageRange.uLastTag  = 0;
    429     pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
    430 
    431     if (a_fGlobal)
    432     {
    433         pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
    434         if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
    435         { /* very likely */ }
    436         else
    437         {
    438             pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
    439             pTlb->cTlbRevisionRollovers++;
    440             unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    441             while (i-- > 0)
    442                 pTlb->aEntries[i * 2 + 1].uTag = 0;
    443         }
    444 
    445         pTlb->cTlbGlobalLargePageCurLoads    = 0;
    446         pTlb->GlobalLargePageRange.uLastTag  = 0;
    447         pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
    448     }
    449 }
    450 #endif
    451 
    452 
    453 /**
    454  * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
    455  */
    456 template<bool a_fGlobal>
    457 DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
    458 {
    459 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    460     Log10(("IEMTlbInvalidateAll\n"));
    461 
    462 # ifdef IEM_WITH_CODE_TLB
    463     pVCpu->iem.s.cbInstrBufTotal = 0;
    464     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
    465     if (a_fGlobal)
    466         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
    467     else
    468         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
    469 # endif
    470 
    471 # ifdef IEM_WITH_DATA_TLB
    472     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
    473     if (a_fGlobal)
    474         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
    475     else
    476         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
    477 # endif
    478 #else
    479     RT_NOREF(pVCpu);
    480 #endif
    481 }
    482 
    483 
    484 /**
    485  * Invalidates non-global the IEM TLB entries.
    486  *
    487  * This is called internally as well as by PGM when moving GC mappings.
    488  *
    489  * @param   pVCpu       The cross context virtual CPU structure of the calling
    490  *                      thread.
    491  */
    492 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
    493 {
    494     iemTlbInvalidateAll<false>(pVCpu);
    495 }
    496 
    497 
    498 /**
    499  * Invalidates all the IEM TLB entries.
    500  *
    501  * This is called internally as well as by PGM when moving GC mappings.
    502  *
    503  * @param   pVCpu       The cross context virtual CPU structure of the calling
    504  *                      thread.
    505  */
    506 VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
    507 {
    508     iemTlbInvalidateAll<true>(pVCpu);
    509 }
    510 
    511 
    512 /**
    513  * Invalidates a page in the TLBs.
    514  *
    515  * @param   pVCpu       The cross context virtual CPU structure of the calling
    516  *                      thread.
    517  * @param   GCPtr       The address of the page to invalidate
    518  * @thread EMT(pVCpu)
    519  */
    520 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
    521 {
    522     IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
    523 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    524     Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
    525     GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
    526     Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
    527     uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
    528 
    529 # ifdef IEM_WITH_CODE_TLB
    530     iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
    531 # endif
    532 # ifdef IEM_WITH_DATA_TLB
    533     iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
    534 # endif
    535 #else
    536     NOREF(pVCpu); NOREF(GCPtr);
    537 #endif
    538 }
    539 
    540 
    541 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    542 /**
    543  * Invalid both TLBs slow fashion following a rollover.
    544  *
    545  * Worker for IEMTlbInvalidateAllPhysical,
    546  * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
    547  * iemMemMapJmp and others.
    548  *
    549  * @thread EMT(pVCpu)
    550  */
    551 void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT
    552 {
    553     Log10(("iemTlbInvalidateAllPhysicalSlow\n"));
    554     ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    555     ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    556 
    557     unsigned i;
    558 # ifdef IEM_WITH_CODE_TLB
    559     i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
    560     while (i-- > 0)
    561     {
    562         pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3       = NULL;
    563         pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    564                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    565     }
    566     pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
    567     pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    568 # endif
    569 # ifdef IEM_WITH_DATA_TLB
    570     i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
    571     while (i-- > 0)
    572     {
    573         pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3       = NULL;
    574         pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    575                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    576     }
    577     pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
    578     pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    579 # endif
    580 
    581 }
    582 #endif
    583 
    584 
    585 /**
    586  * Invalidates the host physical aspects of the IEM TLBs.
    587  *
    588  * This is called internally as well as by PGM when moving GC mappings.
    589  *
    590  * @param   pVCpu       The cross context virtual CPU structure of the calling
    591  *                      thread.
    592  * @note    Currently not used.
    593  */
    594 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
    595 {
    596 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    597     /* Note! This probably won't end up looking exactly like this, but it give an idea... */
    598     Log10(("IEMTlbInvalidateAllPhysical\n"));
    599 
    600 # ifdef IEM_WITH_CODE_TLB
    601     pVCpu->iem.s.cbInstrBufTotal = 0;
    602 # endif
    603     uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
    604     if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
    605     {
    606         pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
    607         pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    608         pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
    609         pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    610     }
    611     else
    612         iemTlbInvalidateAllPhysicalSlow(pVCpu);
    613 #else
    614     NOREF(pVCpu);
    615 #endif
    616 }
    617 
    618 
    619 /**
    620  * Invalidates the host physical aspects of the IEM TLBs.
    621  *
    622  * This is called internally as well as by PGM when moving GC mappings.
    623  *
    624  * @param   pVM         The cross context VM structure.
    625  * @param   idCpuCaller The ID of the calling EMT if available to the caller,
    626  *                      otherwise NIL_VMCPUID.
    627  * @param   enmReason   The reason we're called.
    628  *
    629  * @remarks Caller holds the PGM lock.
    630  */
    631 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
    632 {
    633 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    634     PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
    635     if (pVCpuCaller)
    636         VMCPU_ASSERT_EMT(pVCpuCaller);
    637     Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
    638 
    639     VMCC_FOR_EACH_VMCPU(pVM)
    640     {
    641 # ifdef IEM_WITH_CODE_TLB
    642         if (pVCpuCaller == pVCpu)
    643             pVCpu->iem.s.cbInstrBufTotal = 0;
    644 # endif
    645 
    646         uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
    647         uint64_t       uTlbPhysRevNew  = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
    648         if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
    649         { /* likely */}
    650         else if (pVCpuCaller != pVCpu)
    651             uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
    652         else
    653         {
    654             iemTlbInvalidateAllPhysicalSlow(pVCpu);
    655             continue;
    656         }
    657         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    658             pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    659 
    660         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    661             pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    662     }
    663     VMCC_FOR_EACH_VMCPU_END(pVM);
    664 
    665 #else
    666     RT_NOREF(pVM, idCpuCaller, enmReason);
    667399#endif
    668400}
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette