VirtualBox

Changeset 104956 in vbox


Ignore:
Timestamp:
Jun 18, 2024 11:44:59 AM (9 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
163564
Message:

VMM/IEM: TLB statistics reorg. bugref:10687

Location:
trunk/src/VBox/VMM
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r104941 r104956  
    936936        {
    937937            /* likely when executing lots of code, otherwise unlikely */
    938 # ifdef VBOX_WITH_STATISTICS
    939             pVCpu->iem.s.CodeTlb.cTlbHits++;
     938# ifdef IEM_WITH_TLB_STATISTICS
     939            pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
    940940# endif
    941941            Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
     
    973973        else
    974974        {
    975             pVCpu->iem.s.CodeTlb.cTlbMisses++;
     975            pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
    976976
    977977            /* This page table walking will set A bits as required by the access while performing the walk.
     
    11051105        else
    11061106        {
    1107             pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
     1107            pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
    11081108
    11091109            /* Check instruction length. */
     
    63426342        && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0))) )
    63436343    {
    6344         STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     6344# ifdef IEM_WITH_TLB_STATISTICS
     6345        pVCpu->iem.s.DataTlb.cTlbCoreHits++;
     6346#endif
    63456347
    63466348        /* If the page is either supervisor only or non-writable, we need to do
     
    63946396    else
    63956397    {
    6396         pVCpu->iem.s.DataTlb.cTlbMisses++;
     6398        pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
    63976399
    63986400        /* This page table walking will set A bits as required by the access while performing the walk.
     
    66186620 *                            IEM_MEMMAP_F_ALIGN_GP_OR_AC.
    66196621 *                      Pass zero to skip alignment.
    6620  */
    6621 void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
    6622                    uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
     6622 * @tparam  a_fSafe     Whether this is a call from "safe" fallback function in
     6623 *                      IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
     6624 *                      needs counting as such in the statistics.
     6625 */
     6626template<bool a_fSafeCall = false>
     6627static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
     6628                          uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
    66236629{
    66246630    STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
     
    67396745    if (   pTlbe->uTag == uTag
    67406746        && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY))) )
    6741         STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     6747    {
     6748# ifdef IEM_WITH_TLB_STATISTICS
     6749        if (a_fSafeCall)
     6750            pVCpu->iem.s.DataTlb.cTlbSafeHits++;
     6751        else
     6752            pVCpu->iem.s.DataTlb.cTlbCoreHits++;
     6753# endif
     6754    }
    67426755    else
    67436756    {
    6744         pVCpu->iem.s.DataTlb.cTlbMisses++;
     6757        if (a_fSafeCall)
     6758            pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
     6759        else
     6760            pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
    67456761
    67466762        /* This page table walking will set A and D bits as required by the
     
    69416957    *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    69426958    return pvMem;
     6959}
     6960
     6961
     6962/** @see iemMemMapJmp */
     6963static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
     6964                              uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
     6965{
     6966    return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
    69436967}
    69446968
  • trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h

    r103908 r104956  
    8484#  endif
    8585    uint8_t              bUnmapInfo;
    86     TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
    87                                                                     IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     86    TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
     87                                                                        IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    8888    *pDst = *pSrc;
    8989    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    9898#  endif
    9999    uint8_t              bUnmapInfo;
    100     TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
    101                                                                      IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     100    TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
     101                                                                         IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    102102    TMPL_MEM_TYPE const  uRet = *puSrc;
    103103    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    172172#endif
    173173    uint8_t        bUnmapInfo;
    174     TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
    175                                                          IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     174    TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
     175                                                             IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    176176#ifdef TMPL_MEM_BY_REF
    177177    *puDst = *pValue;
     
    205205    Log8(("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    206206    *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
    207     return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
    208                                          IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     207    return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
     208                                             IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    209209}
    210210
     
    229229    Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    230230    *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
    231     return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
    232                                          IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     231    return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
     232                                             IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    233233}
    234234
     
    253253    Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    254254    *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
    255     return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
    256                                          IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     255    return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
     256                                             IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    257257}
    258258
     
    277277    Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    278278    *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
    279     return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
    280                                          IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     279    return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
     280                                             IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    281281}
    282282
     
    496496
    497497    uint8_t        bUnmapInfo;
    498     TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem,
    499                                                          IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     498    TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem,
     499                                                             IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    500500    *puDst = uValue;
    501501    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    529529        {
    530530            /* WORD per intel specs. */
    531             uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem,
    532                                                        IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */
     531            uint16_t *puDst = (uint16_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem,
     532                                                           IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */
    533533            *puDst = (uint16_t)uValue;
    534534            iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    541541             *        than FS (which all that bs3-cpu-weird-1 does atm).  (Maybe this is
    542542             *        something for the CPU profile... Hope not.) */
    543             uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
    544                                                        IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);
     543            uint32_t *puDst = (uint32_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
     544                                                           IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);
    545545            *puDst = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
    546546            iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    551551    {
    552552        /* DWORD per spec. */
    553         uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
    554                                                    IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);
     553        uint32_t *puDst = (uint32_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
     554                                                       IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);
    555555        *puDst = uValue;
    556556        iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    572572    /* Read the data. */
    573573    uint8_t              bUnmapInfo;
    574     TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
    575                                                                      GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     574    TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
     575                                                                         GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    576576    TMPL_MEM_TYPE const  uValue = *puSrc;
    577577    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    598598    /* Write the data. */
    599599    uint8_t        bUnmapInfo;
    600     TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
    601                                                          IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     600    TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
     601                                                             IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    602602    *puDst = uValue;
    603603    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    625625    /* Read the data. */
    626626    uint8_t              bUnmapInfo;
    627     TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
    628                                                                      GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
     627    TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
     628                                                                         GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    629629    TMPL_MEM_TYPE const  uValue = *puSrc;
    630630    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     
    666666     * ancient hardware when it actually did change. */
    667667    uint8_t   bUnmapInfo;
    668     uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
    669                                                IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */
     668    uint16_t *puDst = (uint16_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
     669                                                   IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */
    670670    *puDst = (uint16_t)uValue;
    671671    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
  • trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h

    r102977 r104956  
    127127                 * Fetch and return the data.
    128128                 */
    129                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     129#  ifdef IEM_WITH_TLB_STATISTICS
     130                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     131#  endif
    130132                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    131133                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    198200                 * Fetch and return the dword
    199201                 */
    200                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     202#  ifdef IEM_WITH_TLB_STATISTICS
     203                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     204#  endif
    201205                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    202206                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    274278                 * Store the value and return.
    275279                 */
    276                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     280#   ifdef IEM_WITH_TLB_STATISTICS
     281                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     282#   endif
    277283                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    278284                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    345351                 * Store the value and return.
    346352                 */
    347                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     353#   ifdef IEM_WITH_TLB_STATISTICS
     354                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     355#   endif
    348356                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    349357                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    419427                 * Return the address.
    420428                 */
    421                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     429#   ifdef IEM_WITH_TLB_STATISTICS
     430                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     431#   endif
    422432                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    423433                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    476486                 * Return the address.
    477487                 */
    478                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     488#   ifdef IEM_WITH_TLB_STATISTICS
     489                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     490#   endif
    479491                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    480492                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    535547                 * Return the address.
    536548                 */
    537                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     549#   ifdef IEM_WITH_TLB_STATISTICS
     550                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     551#   endif
    538552                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    539553                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    592606                 * Return the address.
    593607                 */
    594                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     608#   ifdef IEM_WITH_TLB_STATISTICS
     609                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     610#   endif
    595611                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    596612                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    649665                 * Return the address.
    650666                 */
    651                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     667#   ifdef IEM_WITH_TLB_STATISTICS
     668                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     669#   endif
    652670                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    653671                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    704722                 * Return the address.
    705723                 */
    706                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     724#   ifdef IEM_WITH_TLB_STATISTICS
     725                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     726#   endif
    707727                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    708728                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    759779                 * Return the address.
    760780                 */
    761                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     781#   ifdef IEM_WITH_TLB_STATISTICS
     782                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     783#   endif
    762784                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    763785                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    813835                 * Return the address.
    814836                 */
    815                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     837#  ifdef IEM_WITH_TLB_STATISTICS
     838                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     839#  endif
    816840                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    817841                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    882906                 * Do the store and return.
    883907                 */
    884                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     908#   ifdef IEM_WITH_TLB_STATISTICS
     909                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     910#   endif
    885911                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    886912                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    943969                 * Do the push and return.
    944970                 */
    945                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     971#      ifdef IEM_WITH_TLB_STATISTICS
     972                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     973#      endif
    946974                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    947975                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    10041032                 * Do the push and return.
    10051033                 */
    1006                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1034#    ifdef IEM_WITH_TLB_STATISTICS
     1035                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1036#    endif
    10071037                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    10081038                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    10611091                 * Do the push and return.
    10621092                 */
    1063                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1093#     ifdef IEM_WITH_TLB_STATISTICS
     1094                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1095#     endif
    10641096                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    10651097                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    11171149                 * Do the pop.
    11181150                 */
    1119                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1151#   ifdef IEM_WITH_TLB_STATISTICS
     1152                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1153#   endif
    11201154                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    11211155                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    11691203                 * Do the pop.
    11701204                 */
    1171                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1205#    ifdef IEM_WITH_TLB_STATISTICS
     1206                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1207#    endif
    11721208                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    11731209                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    12261262                 * Do the push and return.
    12271263                 */
    1228                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1264#   ifdef IEM_WITH_TLB_STATISTICS
     1265                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1266#   endif
    12291267                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    12301268                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    12861324                 * Do the pop.
    12871325                 */
    1288                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1326#  ifdef IEM_WITH_TLB_STATISTICS
     1327                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1328#  endif
    12891329                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    12901330                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    13221362RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    13231363{
    1324 if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1364  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    13251365    /* See fallback for details on this weirdness: */
    13261366    bool const    fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
     
    13341374    RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
    13351375    RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop);
    1336 if TMPL_MEM_TYPE_SIZE > 1
     1376   if TMPL_MEM_TYPE_SIZE > 1
    13371377    if (RT_LIKELY(   !(GCPtrEff & (cbAccess - 1U))
    13381378                  || (   cbAccess == sizeof(uint16_t)
    13391379                      ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t)
    13401380                      : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) ))
    1341 endif
     1381  endif
    13421382    {
    13431383        /*
     
    13621402                 * Do the push and return.
    13631403                 */
    1364                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1404#    ifdef IEM_WITH_TLB_STATISTICS
     1405                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1406#    endif
    13651407                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    13661408                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    13951437       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    13961438    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
    1397 endif
     1439  endif
    13981440    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
    13991441}
     
    14121454           && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
    14131455           && pVCpu->cpum.GstCtx.ss.u64Base == 0);
    1414 if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1456  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    14151457    /*
    14161458     * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
    14171459     */
    14181460    uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
    1419 if TMPL_MEM_TYPE_SIZE > 1
     1461   if TMPL_MEM_TYPE_SIZE > 1
    14201462    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp)))
    1421 endif
     1463   endif
    14221464    {
    14231465        /*
     
    14421484                 * Do the push and return.
    14431485                 */
    1444                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1486#     ifdef IEM_WITH_TLB_STATISTICS
     1487                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1488#     endif
    14451489                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    14461490                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    14571501       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    14581502    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
    1459 endif
     1503  endif
    14601504    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
    14611505}
     
    14691513{
    14701514    Assert(iGReg < 16);
    1471 if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1515  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    14721516    /*
    14731517     * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
    14741518     */
    14751519    uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
    1476 #   if TMPL_MEM_TYPE_SIZE > 1
     1520#     if TMPL_MEM_TYPE_SIZE > 1
    14771521    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp)))
    1478 #   endif
     1522#     endif
    14791523    {
    14801524        /*
     
    14981542                 * Do the pop and update the register values.
    14991543                 */
    1500                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1544#     ifdef IEM_WITH_TLB_STATISTICS
     1545                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1546#     endif
    15011547                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    15021548                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
    15031549                TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
    15041550                pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
    1505 #   if TMPL_MEM_TYPE_SIZE == 2
     1551#     if TMPL_MEM_TYPE_SIZE == 2
    15061552                pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
    1507 #   elif TMPL_MEM_TYPE_SIZE == 4
     1553#     elif TMPL_MEM_TYPE_SIZE == 4
    15081554                pVCpu->cpum.GstCtx.aGRegs[iGReg].u   = uValue;
    1509 #   else
    1510 #    error "TMPL_MEM_TYPE_SIZE"
    1511 #   endif
     1555#     else
     1556#      error "TMPL_MEM_TYPE_SIZE"
     1557#     endif
    15121558                Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n",
    15131559                                          uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
     
    15201566       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    15211567    Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
    1522 endif
     1568  endif
    15231569    RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
    15241570}
     
    15361582RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    15371583{
    1538 if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1584  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    15391585    /* See fallback for details on this weirdness: */
    15401586    bool const    fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
     
    15711617                 * Do the push and return.
    15721618                 */
    1573                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1619#     ifdef IEM_WITH_TLB_STATISTICS
     1620                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1621#     endif
    15741622                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    15751623                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    16041652       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    16051653    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
    1606 endif
     1654  endif
    16071655    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
    16081656}
     
    16471695                 * Do the push and return.
    16481696                 */
    1649                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1697#     ifdef IEM_WITH_TLB_STATISTICS
     1698                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1699#     endif
    16501700                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    16511701                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    17031753                 * Do the push and return.
    17041754                 */
    1705                 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     1755#     ifdef IEM_WITH_TLB_STATISTICS
     1756                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1757#     endif
    17061758                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    17071759                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp

    r104877 r104956  
    13061306                                            idxLabelTlbLookup, idxLabelTlbMiss, idxRegGCPhys, offInstr);
    13071307
    1308 # ifdef VBOX_WITH_STATISTICS
     1308# ifdef IEM_WITH_TLB_STATISTICS
    13091309        off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
    13101310                                                RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeCodeTlbHitsForNewPageWithOffset));
     
    16001600                                                  idxLabelTlbLookup, idxLabelTlbMiss, idxRegDummy);
    16011601
    1602 # ifdef VBOX_WITH_STATISTICS
     1602# ifdef IEM_WITH_TLB_STATISTICS
    16031603        off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
    16041604                                                RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeCodeTlbHitsForNewPage));
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h

    r104797 r104956  
    11641164         */
    11651165        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64);
    1166 # ifdef VBOX_WITH_STATISTICS
     1166# ifdef IEM_WITH_TLB_STATISTICS
    11671167        off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2,
    11681168                                                  RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack));
     
    18471847         */
    18481848        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
    1849 # ifdef VBOX_WITH_STATISTICS
     1849# ifdef IEM_WITH_TLB_STATISTICS
    18501850        off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2,
    18511851                                                  RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack));
     
    66786678         */
    66796679        PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64);
    6680 # ifdef VBOX_WITH_STATISTICS
     6680# ifdef IEM_WITH_TLB_STATISTICS
    66816681        off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2,
    66826682                                                  enmOp == kIemNativeEmitMemOp_Store
     
    75657565         */
    75667566        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64);
    7567 # ifdef VBOX_WITH_STATISTICS
     7567# ifdef IEM_WITH_TLB_STATISTICS
    75687568        off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2,
    75697569                                                  RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack));
     
    79127912         */
    79137913        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
    7914 # ifdef VBOX_WITH_STATISTICS
     7914# ifdef IEM_WITH_TLB_STATISTICS
    79157915        off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2,
    79167916                                                  RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack));
     
    84978497        off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask, fAccess,
    84988498                                           idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
    8499 # ifdef VBOX_WITH_STATISTICS
     8499# ifdef IEM_WITH_TLB_STATISTICS
    85008500        off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
    85018501                                                RT_UOFFSETOF(VMCPUCC,  iem.s.StatNativeTlbHitsForMapped));
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r104858 r104956  
    86098609        ENTRY(iem.s.DataTlb.uTlbRevision),
    86108610        ENTRY(iem.s.DataTlb.uTlbPhysRev),
    8611         ENTRY(iem.s.DataTlb.cTlbHits),
     8611        ENTRY(iem.s.DataTlb.cTlbCoreHits),
     8612        ENTRY(iem.s.DataTlb.cTlbInlineCodeHits),
     8613        ENTRY(iem.s.DataTlb.cTlbNativeMissTag),
     8614        ENTRY(iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev),
     8615        ENTRY(iem.s.DataTlb.cTlbNativeMissAlignment),
     8616        ENTRY(iem.s.DataTlb.cTlbNativeMissCrossPage),
     8617        ENTRY(iem.s.DataTlb.cTlbNativeMissNonCanonical),
    86128618        ENTRY(iem.s.DataTlb.aEntries),
    86138619        ENTRY(iem.s.CodeTlb.uTlbRevision),
    86148620        ENTRY(iem.s.CodeTlb.uTlbPhysRev),
    8615         ENTRY(iem.s.CodeTlb.cTlbHits),
     8621        ENTRY(iem.s.CodeTlb.cTlbCoreHits),
     8622        ENTRY(iem.s.CodeTlb.cTlbNativeMissTag),
     8623        ENTRY(iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev),
     8624        ENTRY(iem.s.CodeTlb.cTlbNativeMissAlignment),
     8625        ENTRY(iem.s.CodeTlb.cTlbNativeMissCrossPage),
     8626        ENTRY(iem.s.CodeTlb.cTlbNativeMissNonCanonical),
    86168627        ENTRY(iem.s.CodeTlb.aEntries),
    86178628        ENTRY(pVMR3),
  • trunk/src/VBox/VMM/VMMR3/IEMR3.cpp

    r104941 r104956  
    297297        char   szPat[128];
    298298        RT_NOREF_PV(szPat); /* lazy bird */
     299        char   szVal[128];
     300        RT_NOREF_PV(szVal); /* lazy bird */
    299301
    300302        STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions,               STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     
    319321                        "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
    320322
    321         STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses,          STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    322                         "Code TLB misses",                          "/IEM/CPU%u/CodeTlb-Misses", idCpu);
     323        /* Code TLB: */
    323324        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision,        STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
    324                         "Code TLB revision",                        "/IEM/CPU%u/CodeTlb-Revision", idCpu);
     325                        "Code TLB revision",                            "/IEM/CPU%u/Tlb/Code/Revision", idCpu);
    325326        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
    326                         "Code TLB physical revision",               "/IEM/CPU%u/CodeTlb-PhysRev", idCpu);
    327         STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath,    STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    328                         "Code TLB slow read path",                  "/IEM/CPU%u/CodeTlb-SlowReads", idCpu);
    329 
    330         STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses,          STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    331                         "Data TLB misses",                          "/IEM/CPU%u/DataTlb-Misses", idCpu);
    332         STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath,    STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    333                         "Data TLB safe read path",                  "/IEM/CPU%u/DataTlb-SafeReads", idCpu);
    334         STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath,   STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    335                         "Data TLB safe write path",                 "/IEM/CPU%u/DataTlb-SafeWrites", idCpu);
     327                        "Code TLB physical revision",                   "/IEM/CPU%u/Tlb/Code/RevisionPhysical", idCpu);
     328
     329        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses,      STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     330                        "Code TLB misses",                              "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
     331        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     332                        "Code TLB slow read path",                      "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
     333# ifdef IEM_WITH_TLB_STATISTICS
     334        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits,        STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     335                        "Code TLB hits (non-native)",                   "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
     336#  if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
     337        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     338                        "Code TLB native hits on new page",             "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
     339        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     340                        "Code TLB native hits on new page /w offset",   "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
     341#  endif
     342
     343        RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
     344        STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
     345                          "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
     346
     347        RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
     348        RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
     349        STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
     350                               "Code TLB actual miss rate",             "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
     351
     352#  if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
     353        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     354                        "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
     355                        "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
     356        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     357                        "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
     358                        "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
     359        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     360                        "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
     361                        "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
     362        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage,  STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     363                        "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
     364                        "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
     365        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical,  STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     366                        "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
     367                        "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
     368
     369        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     370                        "Code TLB native misses on new page",
     371                        "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
     372        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     373                        "Code TLB native misses on new page w/ offset",
     374                        "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
     375#  endif
     376# endif /* IEM_WITH_TLB_STATISTICS */
     377
     378        /* Data TLB organized as best we can... */
    336379        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision,        STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
    337                         "Data TLB revision",                        "/IEM/CPU%u/DataTlb-Revision", idCpu);
     380                        "Data TLB revision",                            "/IEM/CPU%u/Tlb/Data/Revision", idCpu);
    338381        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
    339                         "Data TLB physical revision",               "/IEM/CPU%u/DataTlb-PhysRev", idCpu);
    340 
    341 # ifdef VBOX_WITH_STATISTICS
    342         STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits,            STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    343                         "Code TLB hits",                            "/IEM/CPU%u/CodeTlb-Hits", idCpu);
    344         STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits,            STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    345                         "Data TLB hits",                            "/IEM/CPU%u/DataTlb-Hits-Other", idCpu);
     382                        "Data TLB physical revision",                   "/IEM/CPU%u/Tlb/Data/RevisionPhysical", idCpu);
     383
     384        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses,      STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     385                        "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
     386                        "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
     387        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath,    STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     388                        "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
     389                        "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
     390        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath,   STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     391                        "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
     392                        "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
     393        RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
     394        STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
     395                          "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
     396
     397        RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
     398        STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
     399                          "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
     400        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits,        STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     401                        "Data TLB hits in iemMemMapJmp - not part of safe-path total",
     402                        "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
     403        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses,      STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     404                        "Data TLB misses in iemMemMapJmp - not part of safe-path total",
     405                        "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
     406
     407# ifdef IEM_WITH_TLB_STATISTICS
     408#  ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
     409        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     410                        "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
     411                        "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
     412        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     413                        "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
     414                        "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
     415        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     416                        "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
     417                        "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
     418        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage,  STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     419                        "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
     420                        "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
     421        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical,  STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     422                        "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
     423                        "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
     424#  endif
     425# endif
     426
     427# ifdef IEM_WITH_TLB_STATISTICS
     428        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits,        STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     429                        "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
     430                        "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
     431        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits,  STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     432                        "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
     433                        "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
    346434#  ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
    347435        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    348                         "Data TLB native stack access hits",        "/IEM/CPU%u/DataTlb-Hits-Native-Stack", idCpu);
     436                        "Data TLB native stack access hits",
     437                        "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
    349438        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    350                         "Data TLB native data fetch hits",          "/IEM/CPU%u/DataTlb-Hits-Native-Fetch", idCpu);
     439                        "Data TLB native data fetch hits",
     440                        "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
    351441        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    352                         "Data TLB native data store hits",          "/IEM/CPU%u/DataTlb-Hits-Native-Store", idCpu);
     442                        "Data TLB native data store hits",
     443                        "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
    353444        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    354                         "Data TLB native mapped data hits",         "/IEM/CPU%u/DataTlb-Hits-Native-Mapped", idCpu);
     445                        "Data TLB native mapped data hits",
     446                        "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
    355447#  endif
    356         RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu);
    357         STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
    358                           "Data TLB hits total",                    "/IEM/CPU%u/DataTlb-Hits", idCpu);
    359 
    360         RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Safe*", idCpu);
    361         STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
    362                           "Data TLB actual misses",                 "/IEM/CPU%u/DataTlb-SafeTotal", idCpu);
    363         char szVal[128];
    364         RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/DataTlb-SafeTotal", idCpu);
    365         RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu);
     448        RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
     449        STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
     450                          "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
     451
     452#  ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
     453        RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
     454        STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
     455                          "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
     456#  endif
     457
     458        RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
     459        STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
     460                          "/IEM/CPU%u/Tlb/Data/Hits/AllLookups", idCpu);
     461
     462        RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
     463        RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
    366464        STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
    367                                "Data TLB actual miss rate",         "/IEM/CPU%u/DataTlb-SafeRate", idCpu);
    368 
    369 #  ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
    370         STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    371                         "Code TLB native misses on new page",           "/IEM/CPU%u/CodeTlb-Misses-New-Page", idCpu);
    372         STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    373                         "Code TLB native misses on new page w/ offset", "/IEM/CPU%u/CodeTlb-Misses-New-Page-With-Offset", idCpu);
    374         STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    375                         "Code TLB native hits on new page",   "/IEM/CPU%u/CodeTlb-Hits-New-Page", idCpu);
    376         STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    377                         "Code TLB native hits on new page /w offset",   "/IEM/CPU%u/CodeTlb-Hits-New-Page-With-Offset", idCpu);
    378 #  endif
    379 # endif
     465                               "Data TLB actual miss rate",             "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
     466
     467# endif /* IEM_WITH_TLB_STATISTICS */
     468
    380469
    381470#ifdef VBOX_WITH_IEM_RECOMPILER
  • trunk/src/VBox/VMM/include/IEMInternal-armv8.h

    r100966 r104956  
    336336    uint32_t            cTlbMisses;
    337337    /** Slow read path.  */
    338     uint32_t            cTlbSlowReadPath;
     338    uint32_t            cTlbSlowCodeReadPath;
    339339#if 0
    340340    /** TLB misses because of tag mismatch. */
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r104947 r104956  
    111111 * Delay the writeback or dirty registers as long as possible. */
    112112# define IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
     113#endif
     114
     115/** @def IEM_WITH_TLB_STATISTICS
     116 * Enables all TLB statistics. */
     117#if defined(VBOX_WITH_STATISTICS) || defined(DOXYGEN_RUNNING)
     118# define IEM_WITH_TLB_STATISTICS
    113119#endif
    114120
     
    552558    /* Statistics: */
    553559
    554     /** TLB hits (VBOX_WITH_STATISTICS only). */
    555     uint64_t            cTlbHits;
    556     /** TLB misses. */
    557     uint32_t            cTlbMisses;
    558     /** Slow read path.  */
    559     uint32_t            cTlbSlowReadPath;
    560     /** Safe read path.  */
    561     uint32_t            cTlbSafeReadPath;
    562     /** Safe write path.  */
    563     uint32_t            cTlbSafeWritePath;
    564 #if 0
    565     /** TLB misses because of tag mismatch. */
    566     uint32_t            cTlbMissesTag;
    567     /** TLB misses because of virtual access violation. */
    568     uint32_t            cTlbMissesVirtAccess;
    569     /** TLB misses because of dirty bit. */
    570     uint32_t            cTlbMissesDirty;
    571     /** TLB misses because of MMIO */
    572     uint32_t            cTlbMissesMmio;
    573     /** TLB misses because of write access handlers. */
    574     uint32_t            cTlbMissesWriteHandler;
    575     /** TLB misses because no r3(/r0) mapping. */
    576     uint32_t            cTlbMissesMapping;
    577 #endif
     560    /** TLB hits in IEMAll.cpp code (IEM_WITH_TLB_STATISTICS only; both).
     561     * @note For the data TLB this is only used in iemMemMap and and for direct (i.e.
     562     *       not via safe read/write path) calls to iemMemMapJmp. */
     563    uint64_t            cTlbCoreHits;
     564    /** Safe read/write TLB hits in iemMemMapJmp (IEM_WITH_TLB_STATISTICS
     565     *  only; data tlb only). */
     566    uint64_t            cTlbSafeHits;
     567    /** TLB hits in IEMAllMemRWTmplInline.cpp.h (data + IEM_WITH_TLB_STATISTICS only). */
     568    uint64_t            cTlbInlineCodeHits;
     569
     570    /** TLB misses in IEMAll.cpp code (both).
     571     * @note For the data TLB this is only used in iemMemMap and for direct (i.e.
     572     *       not via safe read/write path) calls to iemMemMapJmp. So,
     573     *       for the data TLB this more like 'other misses', while for the code
     574     *       TLB is all misses. */
     575    uint64_t            cTlbCoreMisses;
     576    /** Safe read/write TLB misses in iemMemMapJmp (so data only). */
     577    uint64_t            cTlbSafeMisses;
     578    /** Safe read path taken (data only).  */
     579    uint64_t            cTlbSafeReadPath;
     580    /** Safe write path taken (data only).  */
     581    uint64_t            cTlbSafeWritePath;
     582
     583    /** @name Details for native code TLB misses.
     584     * @note These counts are included in the above counters (cTlbSafeReadPath,
     585     *       cTlbSafeWritePath, cTlbInlineCodeHits).
     586     * @{ */
     587    /** TLB misses in native code due to tag mismatch.   */
     588    STAMCOUNTER         cTlbNativeMissTag;
     589    /** TLB misses in native code due to flags or physical revision mismatch. */
     590    STAMCOUNTER         cTlbNativeMissFlagsAndPhysRev;
     591    /** TLB misses in native code due to misaligned access. */
     592    STAMCOUNTER         cTlbNativeMissAlignment;
     593    /** TLB misses in native code due to cross page access. */
     594    uint32_t            cTlbNativeMissCrossPage;
     595    /** TLB misses in native code due to non-canonical address. */
     596    uint32_t            cTlbNativeMissNonCanonical;
     597    /** @} */
     598
     599    /** Slow read path (code only).  */
     600    uint32_t            cTlbSlowCodeReadPath;
     601
    578602    /** Alignment padding. */
    579     uint32_t            au32Padding[6];
     603    uint32_t            au32Padding[5];
    580604
    581605    /** The TLB entries. */
  • trunk/src/VBox/VMM/include/IEMN8veRecompilerTlbLookup.h

    r104947 r104956  
    545545        /* test regflat, fAlignMask */
    546546        off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, fAlignMask);
     547#ifndef IEM_WITH_TLB_STATISTICS
    547548        /* jnz tlbmiss */
    548549        off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
     550#else
     551        /* jz  1F; inc stat; jmp tlbmiss */
     552        uint32_t const offFixup1 = off;
     553        off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e);
     554        off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
     555                                                  offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissAlignment));
     556        off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
     557        iemNativeFixupFixedJump(pReNative, offFixup1, off);
     558#endif
    549559    }
    550560
     
    559569        /* cmp reg1, GUEST_PAGE_SIZE - cbMem */
    560570        off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE);
     571#ifndef IEM_WITH_TLB_STATISTICS
    561572        /* ja  tlbmiss */
    562573        off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
     574#else
     575        /* jbe 1F; inc stat; jmp tlbmiss */
     576        uint32_t const offFixup1 = off;
     577        off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_be);
     578        off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
     579                                                 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissCrossPage));
     580        off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
     581        iemNativeFixupFixedJump(pReNative, offFixup1, off);
     582#endif
    563583    }
    564584
     
    590610        pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, pTlbState->idxReg1 & 7);
    591611        pCodeBuf[off++] = 1;
     612#  ifndef IEM_WITH_TLB_STATISTICS
    592613        /* ja  tlbmiss */
    593614        off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
     615#  else
     616        /* jbe 1F; inc stat; jmp tlbmiss */
     617        uint32_t const offFixup1 = off;
     618        off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_be);
     619        off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
     620                                                 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissNonCanonical));
     621        off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
     622        iemNativeFixupFixedJump(pReNative, offFixup1, off);
     623#  endif
    594624        /* shr reg1, 16 + GUEST_PAGE_SHIFT */
    595625        off = iemNativeEmitShiftGprRightEx(pCodeBuf, off, pTlbState->idxReg1, 16 + GUEST_PAGE_SHIFT);
     
    603633        Assert(Armv8A64ConvertImmRImmS2Mask32(14, 31) == 0xfffe);
    604634        pCodeBuf[off++] = Armv8A64MkInstrTstImm(pTlbState->idxReg1, 14, 31, false /*f64Bit*/);
    605         /* b.nq tlbmiss */
     635#  ifndef IEM_WITH_TLB_STATISTICS
     636        /* b.ne tlbmiss */
    606637        off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
     638#  else
     639        /* b.eq 1F; inc stat; jmp tlbmiss */
     640        uint32_t const offFixup1 = off;
     641        off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e);
     642        off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
     643                                                 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissNonCanonical));
     644        off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
     645        iemNativeFixupFixedJump(pReNative, offFixup1, off);
     646#  endif
    607647
    608648        /* ubfx reg1, regflat, #12, #36 */
     
    720760#  error "Port me"
    721761# endif
     762# ifndef IEM_WITH_TLB_STATISTICS
    722763    /* jne tlbmiss */
    723764    off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
     765# else
     766    /* je  1F; inc stat; jmp tlbmiss */
     767    uint32_t const offFixup1 = off;
     768    off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e);
     769    off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
     770                                              offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissTag));
     771    off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
     772    iemNativeFixupFixedJump(pReNative, offFixup1, off);
     773# endif
    724774
    725775    /*
     
    764814#  error "Port me"
    765815# endif
     816# ifndef IEM_WITH_TLB_STATISTICS
    766817    /* jne tlbmiss */
    767818    off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
     819# else
     820    /* je  2F; inc stat; jmp tlbmiss */
     821    uint32_t const offFixup2 = off;
     822    off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e);
     823    off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
     824                                              offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissFlagsAndPhysRev));
     825    off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
     826    iemNativeFixupFixedJump(pReNative, offFixup2, off);
     827# endif
    768828
    769829    /*
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette