Changeset 104956 in vbox
- Timestamp:
- Jun 18, 2024 11:44:59 AM (9 months ago)
- svn:sync-xref-src-repo-rev:
- 163564
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r104941 r104956 936 936 { 937 937 /* likely when executing lots of code, otherwise unlikely */ 938 # ifdef VBOX_WITH_STATISTICS939 pVCpu->iem.s.CodeTlb.cTlb Hits++;938 # ifdef IEM_WITH_TLB_STATISTICS 939 pVCpu->iem.s.CodeTlb.cTlbCoreHits++; 940 940 # endif 941 941 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)); … … 973 973 else 974 974 { 975 pVCpu->iem.s.CodeTlb.cTlb Misses++;975 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++; 976 976 977 977 /* This page table walking will set A bits as required by the access while performing the walk. … … 1105 1105 else 1106 1106 { 1107 pVCpu->iem.s.CodeTlb.cTlbSlow ReadPath++;1107 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++; 1108 1108 1109 1109 /* Check instruction length. */ … … 6342 6342 && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0))) ) 6343 6343 { 6344 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 6344 # ifdef IEM_WITH_TLB_STATISTICS 6345 pVCpu->iem.s.DataTlb.cTlbCoreHits++; 6346 #endif 6345 6347 6346 6348 /* If the page is either supervisor only or non-writable, we need to do … … 6394 6396 else 6395 6397 { 6396 pVCpu->iem.s.DataTlb.cTlb Misses++;6398 pVCpu->iem.s.DataTlb.cTlbCoreMisses++; 6397 6399 6398 6400 /* This page table walking will set A bits as required by the access while performing the walk. … … 6618 6620 * IEM_MEMMAP_F_ALIGN_GP_OR_AC. 6619 6621 * Pass zero to skip alignment. 6620 */ 6621 void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess, 6622 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP 6622 * @tparam a_fSafe Whether this is a call from "safe" fallback function in 6623 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that 6624 * needs counting as such in the statistics. 6625 */ 6626 template<bool a_fSafeCall = false> 6627 static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, 6628 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP 6623 6629 { 6624 6630 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp); … … 6739 6745 if ( pTlbe->uTag == uTag 6740 6746 && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY))) ) 6741 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 6747 { 6748 # ifdef IEM_WITH_TLB_STATISTICS 6749 if (a_fSafeCall) 6750 pVCpu->iem.s.DataTlb.cTlbSafeHits++; 6751 else 6752 pVCpu->iem.s.DataTlb.cTlbCoreHits++; 6753 # endif 6754 } 6742 6755 else 6743 6756 { 6744 pVCpu->iem.s.DataTlb.cTlbMisses++; 6757 if (a_fSafeCall) 6758 pVCpu->iem.s.DataTlb.cTlbSafeMisses++; 6759 else 6760 pVCpu->iem.s.DataTlb.cTlbCoreMisses++; 6745 6761 6746 6762 /* This page table walking will set A and D bits as required by the … … 6941 6957 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4); 6942 6958 return pvMem; 6959 } 6960 6961 6962 /** @see iemMemMapJmp */ 6963 static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, 6964 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP 6965 { 6966 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl); 6943 6967 } 6944 6968 -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r103908 r104956 84 84 # endif 85 85 uint8_t bUnmapInfo; 86 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,87 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);86 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem, 87 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 88 88 *pDst = *pSrc; 89 89 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 98 98 # endif 99 99 uint8_t bUnmapInfo; 100 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,101 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);100 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem, 101 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 102 102 TMPL_MEM_TYPE const uRet = *puSrc; 103 103 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 172 172 #endif 173 173 uint8_t bUnmapInfo; 174 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,175 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);174 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem, 175 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 176 176 #ifdef TMPL_MEM_BY_REF 177 177 *puDst = *pValue; … … 205 205 Log8(("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 206 206 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */ 207 return (TMPL_MEM_TYPE *)iemMemMap Jmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,208 IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);207 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 208 IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 209 209 } 210 210 … … 229 229 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 230 230 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */ 231 return (TMPL_MEM_TYPE *)iemMemMap Jmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,232 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);231 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 232 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 233 233 } 234 234 … … 253 253 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 254 254 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */ 255 return (TMPL_MEM_TYPE *)iemMemMap Jmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,256 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);255 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 256 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 257 257 } 258 258 … … 277 277 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 278 278 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */ 279 return (TMPL_MEM_TYPE *)iemMemMap Jmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,280 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);279 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 280 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 281 281 } 282 282 … … 496 496 497 497 uint8_t bUnmapInfo; 498 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem,499 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);498 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem, 499 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 500 500 *puDst = uValue; 501 501 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 529 529 { 530 530 /* WORD per intel specs. */ 531 uint16_t *puDst = (uint16_t *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem,532 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */531 uint16_t *puDst = (uint16_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem, 532 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */ 533 533 *puDst = (uint16_t)uValue; 534 534 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 541 541 * than FS (which all that bs3-cpu-weird-1 does atm). (Maybe this is 542 542 * something for the CPU profile... Hope not.) */ 543 uint32_t *puDst = (uint32_t *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,544 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);543 uint32_t *puDst = (uint32_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem, 544 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); 545 545 *puDst = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK)); 546 546 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 551 551 { 552 552 /* DWORD per spec. */ 553 uint32_t *puDst = (uint32_t *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,554 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);553 uint32_t *puDst = (uint32_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem, 554 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); 555 555 *puDst = uValue; 556 556 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 572 572 /* Read the data. */ 573 573 uint8_t bUnmapInfo; 574 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,575 GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);574 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, 575 GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 576 576 TMPL_MEM_TYPE const uValue = *puSrc; 577 577 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 598 598 /* Write the data. */ 599 599 uint8_t bUnmapInfo; 600 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,601 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);600 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 601 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 602 602 *puDst = uValue; 603 603 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 625 625 /* Read the data. */ 626 626 uint8_t bUnmapInfo; 627 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,628 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);627 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, 628 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 629 629 TMPL_MEM_TYPE const uValue = *puSrc; 630 630 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 666 666 * ancient hardware when it actually did change. */ 667 667 uint8_t bUnmapInfo; 668 uint16_t *puDst = (uint16_t *)iemMemMap Jmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,669 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */668 uint16_t *puDst = (uint16_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop, 669 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */ 670 670 *puDst = (uint16_t)uValue; 671 671 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r102977 r104956 127 127 * Fetch and return the data. 128 128 */ 129 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 129 # ifdef IEM_WITH_TLB_STATISTICS 130 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 131 # endif 130 132 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 131 133 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 198 200 * Fetch and return the dword 199 201 */ 200 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 202 # ifdef IEM_WITH_TLB_STATISTICS 203 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 204 # endif 201 205 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 202 206 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 274 278 * Store the value and return. 275 279 */ 276 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 280 # ifdef IEM_WITH_TLB_STATISTICS 281 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 282 # endif 277 283 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 278 284 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 345 351 * Store the value and return. 346 352 */ 347 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 353 # ifdef IEM_WITH_TLB_STATISTICS 354 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 355 # endif 348 356 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 349 357 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 419 427 * Return the address. 420 428 */ 421 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 429 # ifdef IEM_WITH_TLB_STATISTICS 430 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 431 # endif 422 432 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 423 433 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 476 486 * Return the address. 477 487 */ 478 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 488 # ifdef IEM_WITH_TLB_STATISTICS 489 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 490 # endif 479 491 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 480 492 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 535 547 * Return the address. 536 548 */ 537 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 549 # ifdef IEM_WITH_TLB_STATISTICS 550 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 551 # endif 538 552 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 539 553 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 592 606 * Return the address. 593 607 */ 594 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 608 # ifdef IEM_WITH_TLB_STATISTICS 609 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 610 # endif 595 611 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 596 612 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 649 665 * Return the address. 650 666 */ 651 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 667 # ifdef IEM_WITH_TLB_STATISTICS 668 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 669 # endif 652 670 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 653 671 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 704 722 * Return the address. 705 723 */ 706 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 724 # ifdef IEM_WITH_TLB_STATISTICS 725 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 726 # endif 707 727 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 708 728 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 759 779 * Return the address. 760 780 */ 761 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 781 # ifdef IEM_WITH_TLB_STATISTICS 782 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 783 # endif 762 784 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 763 785 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 813 835 * Return the address. 814 836 */ 815 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 837 # ifdef IEM_WITH_TLB_STATISTICS 838 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 839 # endif 816 840 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 817 841 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 882 906 * Do the store and return. 883 907 */ 884 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 908 # ifdef IEM_WITH_TLB_STATISTICS 909 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 910 # endif 885 911 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 886 912 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 943 969 * Do the push and return. 944 970 */ 945 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 971 # ifdef IEM_WITH_TLB_STATISTICS 972 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 973 # endif 946 974 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 947 975 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1004 1032 * Do the push and return. 1005 1033 */ 1006 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1034 # ifdef IEM_WITH_TLB_STATISTICS 1035 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1036 # endif 1007 1037 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1008 1038 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1061 1091 * Do the push and return. 1062 1092 */ 1063 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1093 # ifdef IEM_WITH_TLB_STATISTICS 1094 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1095 # endif 1064 1096 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1065 1097 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1117 1149 * Do the pop. 1118 1150 */ 1119 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1151 # ifdef IEM_WITH_TLB_STATISTICS 1152 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1153 # endif 1120 1154 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1121 1155 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1169 1203 * Do the pop. 1170 1204 */ 1171 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1205 # ifdef IEM_WITH_TLB_STATISTICS 1206 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1207 # endif 1172 1208 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1173 1209 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1226 1262 * Do the push and return. 1227 1263 */ 1228 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1264 # ifdef IEM_WITH_TLB_STATISTICS 1265 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1266 # endif 1229 1267 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1230 1268 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1286 1324 * Do the pop. 1287 1325 */ 1288 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1326 # ifdef IEM_WITH_TLB_STATISTICS 1327 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1328 # endif 1289 1329 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1290 1330 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1322 1362 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 1323 1363 { 1324 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)1364 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1325 1365 /* See fallback for details on this weirdness: */ 1326 1366 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu); … … 1334 1374 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp); 1335 1375 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop); 1336 # if TMPL_MEM_TYPE_SIZE > 11376 # if TMPL_MEM_TYPE_SIZE > 1 1337 1377 if (RT_LIKELY( !(GCPtrEff & (cbAccess - 1U)) 1338 1378 || ( cbAccess == sizeof(uint16_t) 1339 1379 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) 1340 1380 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) )) 1341 # endif1381 # endif 1342 1382 { 1343 1383 /* … … 1362 1402 * Do the push and return. 1363 1403 */ 1364 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1404 # ifdef IEM_WITH_TLB_STATISTICS 1405 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1406 # endif 1365 1407 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1366 1408 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1395 1437 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1396 1438 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 1397 # endif1439 # endif 1398 1440 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue); 1399 1441 } … … 1412 1454 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX 1413 1455 && pVCpu->cpum.GstCtx.ss.u64Base == 0); 1414 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)1456 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1415 1457 /* 1416 1458 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1417 1459 */ 1418 1460 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE); 1419 # if TMPL_MEM_TYPE_SIZE > 11461 # if TMPL_MEM_TYPE_SIZE > 1 1420 1462 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp))) 1421 # endif1463 # endif 1422 1464 { 1423 1465 /* … … 1442 1484 * Do the push and return. 1443 1485 */ 1444 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1486 # ifdef IEM_WITH_TLB_STATISTICS 1487 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1488 # endif 1445 1489 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1446 1490 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1457 1501 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1458 1502 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp)); 1459 # endif1503 # endif 1460 1504 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); 1461 1505 } … … 1469 1513 { 1470 1514 Assert(iGReg < 16); 1471 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)1515 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1472 1516 /* 1473 1517 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1474 1518 */ 1475 1519 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp; 1476 # if TMPL_MEM_TYPE_SIZE > 11520 # if TMPL_MEM_TYPE_SIZE > 1 1477 1521 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp))) 1478 # endif1522 # endif 1479 1523 { 1480 1524 /* … … 1498 1542 * Do the pop and update the register values. 1499 1543 */ 1500 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1544 # ifdef IEM_WITH_TLB_STATISTICS 1545 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1546 # endif 1501 1547 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1502 1548 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1503 1549 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK]; 1504 1550 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */ 1505 # if TMPL_MEM_TYPE_SIZE == 21551 # if TMPL_MEM_TYPE_SIZE == 2 1506 1552 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue; 1507 # elif TMPL_MEM_TYPE_SIZE == 41553 # elif TMPL_MEM_TYPE_SIZE == 4 1508 1554 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue; 1509 # else1510 # error "TMPL_MEM_TYPE_SIZE"1511 # endif1555 # else 1556 # error "TMPL_MEM_TYPE_SIZE" 1557 # endif 1512 1558 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n", 1513 1559 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg)); … … 1520 1566 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1521 1567 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp)); 1522 # endif1568 # endif 1523 1569 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg); 1524 1570 } … … 1536 1582 RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 1537 1583 { 1538 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)1584 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1539 1585 /* See fallback for details on this weirdness: */ 1540 1586 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu); … … 1571 1617 * Do the push and return. 1572 1618 */ 1573 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1619 # ifdef IEM_WITH_TLB_STATISTICS 1620 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1621 # endif 1574 1622 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1575 1623 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1604 1652 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1605 1653 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp)); 1606 # endif1654 # endif 1607 1655 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue); 1608 1656 } … … 1647 1695 * Do the push and return. 1648 1696 */ 1649 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1697 # ifdef IEM_WITH_TLB_STATISTICS 1698 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1699 # endif 1650 1700 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1651 1701 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1703 1753 * Do the push and return. 1704 1754 */ 1705 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1755 # ifdef IEM_WITH_TLB_STATISTICS 1756 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1757 # endif 1706 1758 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1707 1759 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp
r104877 r104956 1306 1306 idxLabelTlbLookup, idxLabelTlbMiss, idxRegGCPhys, offInstr); 1307 1307 1308 # ifdef VBOX_WITH_STATISTICS1308 # ifdef IEM_WITH_TLB_STATISTICS 1309 1309 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2, 1310 1310 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeCodeTlbHitsForNewPageWithOffset)); … … 1600 1600 idxLabelTlbLookup, idxLabelTlbMiss, idxRegDummy); 1601 1601 1602 # ifdef VBOX_WITH_STATISTICS1602 # ifdef IEM_WITH_TLB_STATISTICS 1603 1603 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2, 1604 1604 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeCodeTlbHitsForNewPage)); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r104797 r104956 1164 1164 */ 1165 1165 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64); 1166 # ifdef VBOX_WITH_STATISTICS1166 # ifdef IEM_WITH_TLB_STATISTICS 1167 1167 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2, 1168 1168 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack)); … … 1847 1847 */ 1848 1848 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 1849 # ifdef VBOX_WITH_STATISTICS1849 # ifdef IEM_WITH_TLB_STATISTICS 1850 1850 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2, 1851 1851 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack)); … … 6678 6678 */ 6679 6679 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64); 6680 # ifdef VBOX_WITH_STATISTICS6680 # ifdef IEM_WITH_TLB_STATISTICS 6681 6681 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2, 6682 6682 enmOp == kIemNativeEmitMemOp_Store … … 7565 7565 */ 7566 7566 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64); 7567 # ifdef VBOX_WITH_STATISTICS7567 # ifdef IEM_WITH_TLB_STATISTICS 7568 7568 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2, 7569 7569 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack)); … … 7912 7912 */ 7913 7913 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 7914 # ifdef VBOX_WITH_STATISTICS7914 # ifdef IEM_WITH_TLB_STATISTICS 7915 7915 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2, 7916 7916 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack)); … … 8497 8497 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask, fAccess, 8498 8498 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 8499 # ifdef VBOX_WITH_STATISTICS8499 # ifdef IEM_WITH_TLB_STATISTICS 8500 8500 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2, 8501 8501 RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForMapped)); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r104858 r104956 8609 8609 ENTRY(iem.s.DataTlb.uTlbRevision), 8610 8610 ENTRY(iem.s.DataTlb.uTlbPhysRev), 8611 ENTRY(iem.s.DataTlb.cTlbHits), 8611 ENTRY(iem.s.DataTlb.cTlbCoreHits), 8612 ENTRY(iem.s.DataTlb.cTlbInlineCodeHits), 8613 ENTRY(iem.s.DataTlb.cTlbNativeMissTag), 8614 ENTRY(iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev), 8615 ENTRY(iem.s.DataTlb.cTlbNativeMissAlignment), 8616 ENTRY(iem.s.DataTlb.cTlbNativeMissCrossPage), 8617 ENTRY(iem.s.DataTlb.cTlbNativeMissNonCanonical), 8612 8618 ENTRY(iem.s.DataTlb.aEntries), 8613 8619 ENTRY(iem.s.CodeTlb.uTlbRevision), 8614 8620 ENTRY(iem.s.CodeTlb.uTlbPhysRev), 8615 ENTRY(iem.s.CodeTlb.cTlbHits), 8621 ENTRY(iem.s.CodeTlb.cTlbCoreHits), 8622 ENTRY(iem.s.CodeTlb.cTlbNativeMissTag), 8623 ENTRY(iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev), 8624 ENTRY(iem.s.CodeTlb.cTlbNativeMissAlignment), 8625 ENTRY(iem.s.CodeTlb.cTlbNativeMissCrossPage), 8626 ENTRY(iem.s.CodeTlb.cTlbNativeMissNonCanonical), 8616 8627 ENTRY(iem.s.CodeTlb.aEntries), 8617 8628 ENTRY(pVMR3), -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r104941 r104956 297 297 char szPat[128]; 298 298 RT_NOREF_PV(szPat); /* lazy bird */ 299 char szVal[128]; 300 RT_NOREF_PV(szVal); /* lazy bird */ 299 301 300 302 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, … … 319 321 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu); 320 322 321 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 322 "Code TLB misses", "/IEM/CPU%u/CodeTlb-Misses", idCpu); 323 /* Code TLB: */ 323 324 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 324 "Code TLB revision", "/IEM/CPU%u/CodeTlb-Revision", idCpu);325 "Code TLB revision", "/IEM/CPU%u/Tlb/Code/Revision", idCpu); 325 326 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 326 "Code TLB physical revision", "/IEM/CPU%u/CodeTlb-PhysRev", idCpu); 327 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 328 "Code TLB slow read path", "/IEM/CPU%u/CodeTlb-SlowReads", idCpu); 329 330 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 331 "Data TLB misses", "/IEM/CPU%u/DataTlb-Misses", idCpu); 332 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 333 "Data TLB safe read path", "/IEM/CPU%u/DataTlb-SafeReads", idCpu); 334 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 335 "Data TLB safe write path", "/IEM/CPU%u/DataTlb-SafeWrites", idCpu); 327 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/RevisionPhysical", idCpu); 328 329 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 330 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu); 331 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 332 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu); 333 # ifdef IEM_WITH_TLB_STATISTICS 334 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 335 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu); 336 # if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) 337 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 338 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu); 339 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 340 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu); 341 # endif 342 343 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu); 344 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits", 345 "/IEM/CPU%u/Tlb/Code/Hits", idCpu); 346 347 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu); 348 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu); 349 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat, 350 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu); 351 352 # if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) 353 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 354 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]", 355 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu); 356 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 357 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]", 358 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu); 359 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 360 "Code TLB misses in native code: Alignment [not directly included grand parent sum]", 361 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu); 362 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 363 "Code TLB misses in native code: Cross page [not directly included grand parent sum]", 364 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu); 365 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 366 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]", 367 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu); 368 369 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 370 "Code TLB native misses on new page", 371 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu); 372 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 373 "Code TLB native misses on new page w/ offset", 374 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu); 375 # endif 376 # endif /* IEM_WITH_TLB_STATISTICS */ 377 378 /* Data TLB organized as best we can... */ 336 379 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 337 "Data TLB revision", "/IEM/CPU%u/DataTlb-Revision", idCpu);380 "Data TLB revision", "/IEM/CPU%u/Tlb/Data/Revision", idCpu); 338 381 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 339 "Data TLB physical revision", "/IEM/CPU%u/DataTlb-PhysRev", idCpu); 340 341 # ifdef VBOX_WITH_STATISTICS 342 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 343 "Code TLB hits", "/IEM/CPU%u/CodeTlb-Hits", idCpu); 344 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 345 "Data TLB hits", "/IEM/CPU%u/DataTlb-Hits-Other", idCpu); 382 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/RevisionPhysical", idCpu); 383 384 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 385 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))", 386 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu); 387 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 388 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)", 389 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu); 390 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 391 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)", 392 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu); 393 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu); 394 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses", 395 "/IEM/CPU%u/Tlb/Data/Misses", idCpu); 396 397 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu); 398 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)", 399 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu); 400 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 401 "Data TLB hits in iemMemMapJmp - not part of safe-path total", 402 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu); 403 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 404 "Data TLB misses in iemMemMapJmp - not part of safe-path total", 405 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu); 406 407 # ifdef IEM_WITH_TLB_STATISTICS 408 # ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER 409 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 410 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]", 411 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu); 412 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 413 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]", 414 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu); 415 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 416 "Data TLB misses in native code: Alignment [not directly included grand parent sum]", 417 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu); 418 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 419 "Data TLB misses in native code: Cross page [not directly included grand parent sum]", 420 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu); 421 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 422 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]", 423 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu); 424 # endif 425 # endif 426 427 # ifdef IEM_WITH_TLB_STATISTICS 428 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 429 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))", 430 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu); 431 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 432 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h", 433 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu); 346 434 # ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER 347 435 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 348 "Data TLB native stack access hits", "/IEM/CPU%u/DataTlb-Hits-Native-Stack", idCpu); 436 "Data TLB native stack access hits", 437 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu); 349 438 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 350 "Data TLB native data fetch hits", "/IEM/CPU%u/DataTlb-Hits-Native-Fetch", idCpu); 439 "Data TLB native data fetch hits", 440 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu); 351 441 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 352 "Data TLB native data store hits", "/IEM/CPU%u/DataTlb-Hits-Native-Store", idCpu); 442 "Data TLB native data store hits", 443 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu); 353 444 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 354 "Data TLB native mapped data hits", "/IEM/CPU%u/DataTlb-Hits-Native-Mapped", idCpu); 445 "Data TLB native mapped data hits", 446 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu); 355 447 # endif 356 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu); 357 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, 358 "Data TLB hits total", "/IEM/CPU%u/DataTlb-Hits", idCpu); 359 360 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Safe*", idCpu); 361 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, 362 "Data TLB actual misses", "/IEM/CPU%u/DataTlb-SafeTotal", idCpu); 363 char szVal[128]; 364 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/DataTlb-SafeTotal", idCpu); 365 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu); 448 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu); 449 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits", 450 "/IEM/CPU%u/Tlb/Data/Hits", idCpu); 451 452 # ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER 453 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu); 454 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code", 455 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu); 456 # endif 457 458 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu); 459 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)", 460 "/IEM/CPU%u/Tlb/Data/Hits/AllLookups", idCpu); 461 462 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu); 463 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu); 366 464 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat, 367 "Data TLB actual miss rate", "/IEM/CPU%u/DataTlb-SafeRate", idCpu); 368 369 # ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER 370 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 371 "Code TLB native misses on new page", "/IEM/CPU%u/CodeTlb-Misses-New-Page", idCpu); 372 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 373 "Code TLB native misses on new page w/ offset", "/IEM/CPU%u/CodeTlb-Misses-New-Page-With-Offset", idCpu); 374 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 375 "Code TLB native hits on new page", "/IEM/CPU%u/CodeTlb-Hits-New-Page", idCpu); 376 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 377 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/CodeTlb-Hits-New-Page-With-Offset", idCpu); 378 # endif 379 # endif 465 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu); 466 467 # endif /* IEM_WITH_TLB_STATISTICS */ 468 380 469 381 470 #ifdef VBOX_WITH_IEM_RECOMPILER -
trunk/src/VBox/VMM/include/IEMInternal-armv8.h
r100966 r104956 336 336 uint32_t cTlbMisses; 337 337 /** Slow read path. */ 338 uint32_t cTlbSlow ReadPath;338 uint32_t cTlbSlowCodeReadPath; 339 339 #if 0 340 340 /** TLB misses because of tag mismatch. */ -
trunk/src/VBox/VMM/include/IEMInternal.h
r104947 r104956 111 111 * Delay the writeback or dirty registers as long as possible. */ 112 112 # define IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 113 #endif 114 115 /** @def IEM_WITH_TLB_STATISTICS 116 * Enables all TLB statistics. */ 117 #if defined(VBOX_WITH_STATISTICS) || defined(DOXYGEN_RUNNING) 118 # define IEM_WITH_TLB_STATISTICS 113 119 #endif 114 120 … … 552 558 /* Statistics: */ 553 559 554 /** TLB hits (VBOX_WITH_STATISTICS only). */ 555 uint64_t cTlbHits; 556 /** TLB misses. */ 557 uint32_t cTlbMisses; 558 /** Slow read path. */ 559 uint32_t cTlbSlowReadPath; 560 /** Safe read path. */ 561 uint32_t cTlbSafeReadPath; 562 /** Safe write path. */ 563 uint32_t cTlbSafeWritePath; 564 #if 0 565 /** TLB misses because of tag mismatch. */ 566 uint32_t cTlbMissesTag; 567 /** TLB misses because of virtual access violation. */ 568 uint32_t cTlbMissesVirtAccess; 569 /** TLB misses because of dirty bit. */ 570 uint32_t cTlbMissesDirty; 571 /** TLB misses because of MMIO */ 572 uint32_t cTlbMissesMmio; 573 /** TLB misses because of write access handlers. */ 574 uint32_t cTlbMissesWriteHandler; 575 /** TLB misses because no r3(/r0) mapping. */ 576 uint32_t cTlbMissesMapping; 577 #endif 560 /** TLB hits in IEMAll.cpp code (IEM_WITH_TLB_STATISTICS only; both). 561 * @note For the data TLB this is only used in iemMemMap and and for direct (i.e. 562 * not via safe read/write path) calls to iemMemMapJmp. */ 563 uint64_t cTlbCoreHits; 564 /** Safe read/write TLB hits in iemMemMapJmp (IEM_WITH_TLB_STATISTICS 565 * only; data tlb only). */ 566 uint64_t cTlbSafeHits; 567 /** TLB hits in IEMAllMemRWTmplInline.cpp.h (data + IEM_WITH_TLB_STATISTICS only). */ 568 uint64_t cTlbInlineCodeHits; 569 570 /** TLB misses in IEMAll.cpp code (both). 571 * @note For the data TLB this is only used in iemMemMap and for direct (i.e. 572 * not via safe read/write path) calls to iemMemMapJmp. So, 573 * for the data TLB this more like 'other misses', while for the code 574 * TLB is all misses. */ 575 uint64_t cTlbCoreMisses; 576 /** Safe read/write TLB misses in iemMemMapJmp (so data only). */ 577 uint64_t cTlbSafeMisses; 578 /** Safe read path taken (data only). */ 579 uint64_t cTlbSafeReadPath; 580 /** Safe write path taken (data only). */ 581 uint64_t cTlbSafeWritePath; 582 583 /** @name Details for native code TLB misses. 584 * @note These counts are included in the above counters (cTlbSafeReadPath, 585 * cTlbSafeWritePath, cTlbInlineCodeHits). 586 * @{ */ 587 /** TLB misses in native code due to tag mismatch. */ 588 STAMCOUNTER cTlbNativeMissTag; 589 /** TLB misses in native code due to flags or physical revision mismatch. */ 590 STAMCOUNTER cTlbNativeMissFlagsAndPhysRev; 591 /** TLB misses in native code due to misaligned access. */ 592 STAMCOUNTER cTlbNativeMissAlignment; 593 /** TLB misses in native code due to cross page access. */ 594 uint32_t cTlbNativeMissCrossPage; 595 /** TLB misses in native code due to non-canonical address. */ 596 uint32_t cTlbNativeMissNonCanonical; 597 /** @} */ 598 599 /** Slow read path (code only). */ 600 uint32_t cTlbSlowCodeReadPath; 601 578 602 /** Alignment padding. */ 579 uint32_t au32Padding[ 6];603 uint32_t au32Padding[5]; 580 604 581 605 /** The TLB entries. */ -
trunk/src/VBox/VMM/include/IEMN8veRecompilerTlbLookup.h
r104947 r104956 545 545 /* test regflat, fAlignMask */ 546 546 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, fAlignMask); 547 #ifndef IEM_WITH_TLB_STATISTICS 547 548 /* jnz tlbmiss */ 548 549 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne); 550 #else 551 /* jz 1F; inc stat; jmp tlbmiss */ 552 uint32_t const offFixup1 = off; 553 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e); 554 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 555 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissAlignment)); 556 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 557 iemNativeFixupFixedJump(pReNative, offFixup1, off); 558 #endif 549 559 } 550 560 … … 559 569 /* cmp reg1, GUEST_PAGE_SIZE - cbMem */ 560 570 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE); 571 #ifndef IEM_WITH_TLB_STATISTICS 561 572 /* ja tlbmiss */ 562 573 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe); 574 #else 575 /* jbe 1F; inc stat; jmp tlbmiss */ 576 uint32_t const offFixup1 = off; 577 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_be); 578 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 579 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissCrossPage)); 580 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 581 iemNativeFixupFixedJump(pReNative, offFixup1, off); 582 #endif 563 583 } 564 584 … … 590 610 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, pTlbState->idxReg1 & 7); 591 611 pCodeBuf[off++] = 1; 612 # ifndef IEM_WITH_TLB_STATISTICS 592 613 /* ja tlbmiss */ 593 614 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe); 615 # else 616 /* jbe 1F; inc stat; jmp tlbmiss */ 617 uint32_t const offFixup1 = off; 618 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_be); 619 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 620 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissNonCanonical)); 621 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 622 iemNativeFixupFixedJump(pReNative, offFixup1, off); 623 # endif 594 624 /* shr reg1, 16 + GUEST_PAGE_SHIFT */ 595 625 off = iemNativeEmitShiftGprRightEx(pCodeBuf, off, pTlbState->idxReg1, 16 + GUEST_PAGE_SHIFT); … … 603 633 Assert(Armv8A64ConvertImmRImmS2Mask32(14, 31) == 0xfffe); 604 634 pCodeBuf[off++] = Armv8A64MkInstrTstImm(pTlbState->idxReg1, 14, 31, false /*f64Bit*/); 605 /* b.nq tlbmiss */ 635 # ifndef IEM_WITH_TLB_STATISTICS 636 /* b.ne tlbmiss */ 606 637 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne); 638 # else 639 /* b.eq 1F; inc stat; jmp tlbmiss */ 640 uint32_t const offFixup1 = off; 641 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e); 642 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 643 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissNonCanonical)); 644 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 645 iemNativeFixupFixedJump(pReNative, offFixup1, off); 646 # endif 607 647 608 648 /* ubfx reg1, regflat, #12, #36 */ … … 720 760 # error "Port me" 721 761 # endif 762 # ifndef IEM_WITH_TLB_STATISTICS 722 763 /* jne tlbmiss */ 723 764 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne); 765 # else 766 /* je 1F; inc stat; jmp tlbmiss */ 767 uint32_t const offFixup1 = off; 768 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e); 769 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 770 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissTag)); 771 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 772 iemNativeFixupFixedJump(pReNative, offFixup1, off); 773 # endif 724 774 725 775 /* … … 764 814 # error "Port me" 765 815 # endif 816 # ifndef IEM_WITH_TLB_STATISTICS 766 817 /* jne tlbmiss */ 767 818 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne); 819 # else 820 /* je 2F; inc stat; jmp tlbmiss */ 821 uint32_t const offFixup2 = off; 822 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e); 823 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 824 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissFlagsAndPhysRev)); 825 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 826 iemNativeFixupFixedJump(pReNative, offFixup2, off); 827 # endif 768 828 769 829 /*
Note:
See TracChangeset
for help on using the changeset viewer.