- Timestamp:
- Jul 12, 2024 12:14:36 AM (5 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r105250 r105284 222 222 RTGCPTR uTagNoRev = (a_uValue); \ 223 223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \ 224 /** @todo do large page accounting */ \ 224 225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \ 225 226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \ … … 638 639 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 639 640 /** 641 * Helper for doing large page accounting at TLB load time. 642 */ 643 template<bool const a_fGlobal> 644 DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages) 645 { 646 if (a_fGlobal) 647 pTlb->cTlbGlobalLargePageCurLoads++; 648 else 649 pTlb->cTlbNonGlobalLargePageCurLoads++; 650 651 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U); 652 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT; 653 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal 654 ? &pTlb->GlobalLargePageRange 655 : &pTlb->NonGlobalLargePageRange; 656 uTagNoRev &= ~(RTGCPTR)fMask; 657 if (uTagNoRev < pRange->uFirstTag) 658 pRange->uFirstTag = uTagNoRev; 659 660 uTagNoRev |= fMask; 661 if (uTagNoRev > pRange->uLastTag) 662 pRange->uLastTag = uTagNoRev; 663 } 664 #endif 665 666 667 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 668 /** 640 669 * Worker for iemTlbInvalidateAll. 641 670 */ … … 659 688 pTlb->aEntries[i * 2].uTag = 0; 660 689 } 690 691 pTlb->cTlbNonGlobalLargePageCurLoads = 0; 692 pTlb->NonGlobalLargePageRange.uLastTag = 0; 693 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX; 694 661 695 if (a_fGlobal) 662 696 { … … 672 706 pTlb->aEntries[i * 2 + 1].uTag = 0; 673 707 } 708 709 pTlb->cTlbGlobalLargePageCurLoads = 0; 710 pTlb->GlobalLargePageRange.uLastTag = 0; 711 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX; 674 712 } 675 713 } … … 729 767 730 768 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 731 template<bool a_fDataTlb> 769 770 template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal> 771 DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag) 772 { 773 /* Combine TAG values with the TLB revisions. */ 774 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0; 775 if (a_fNonGlobal) 776 GCPtrTag |= pTlb->uTlbRevision; 777 778 /* Set up the scan. */ 779 bool const fPartialScan = IEMTLB_ENTRY_COUNT >= (a_f2MbLargePage ? 512 : 1024); 780 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0; 781 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + (a_f2MbLargePage ? 512 : 1024) : IEMTLB_ENTRY_COUNT; 782 RTGCPTR const GCPtrTagMask = fPartialScan 783 ? ~(RTGCPTR)0 784 : ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK 785 & ~(RTGCPTR)( ( RT_BIT_64((a_f2MbLargePage ? 9 : 10) - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) 786 - 1U) 787 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO); 788 789 /* 790 * Do the scanning. 791 */ 792 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2) 793 { 794 if (a_fNonGlobal) 795 { 796 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag) 797 { 798 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) 799 { 800 pTlb->aEntries[idxEven].uTag = 0; 801 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag) 802 pVCpu->iem.s.cbInstrBufTotal = 0; 803 } 804 } 805 GCPtrTag++; 806 } 807 808 if (a_fGlobal) 809 { 810 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) 811 { 812 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) 813 { 814 pTlb->aEntries[idxEven + 1].uTag = 0; 815 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag) 816 pVCpu->iem.s.cbInstrBufTotal = 0; 817 } 818 } 819 GCPtrTagGlob++; 820 } 821 } 822 823 } 824 825 template<bool const a_fDataTlb, bool const a_f2MbLargePage> 826 DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag) 827 { 828 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U); 829 830 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U); 831 if ( pTlb->GlobalLargePageRange.uFirstTag >= GCPtrTag 832 && pTlb->GlobalLargePageRange.uLastTag <= GCPtrTag) 833 { 834 if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag 835 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag) 836 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag); 837 else 838 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag); 839 } 840 else if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag 841 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag) 842 { /* Large pages aren't as likely in the non-global TLB half. */ } 843 else 844 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag); 845 } 846 847 template<bool const a_fDataTlb> 732 848 DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) 733 849 { … … 761 877 * ASSUMES that tag calculation is a right shift by GUEST_PAGE_SHIFT. 762 878 */ 763 if (fMaybeLargePage) 764 { 765 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U); 766 RTGCPTR const GCPtrInstrBufPcTag = IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc); 767 if ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)) 768 { 769 /* 2MB large page */ 770 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64(21 - GUEST_PAGE_SHIFT) - 1U); 771 RTGCPTR GCPtrTagGlob = GCPtrTag | pTlb->uTlbRevisionGlobal; 772 GCPtrTag |= pTlb->uTlbRevision; 773 774 # if IEMTLB_ENTRY_COUNT >= 512 775 idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag); 776 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)0; 777 uintptr_t const idxEvenEnd = idxEven + 512; 879 if ( fMaybeLargePage 880 # if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */ 881 && (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)) 778 882 # else 779 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK 780 & ~(RTGCPTR)( (RT_BIT_64(9 - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U) 781 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO); 782 uintptr_t const idxEvenEnd = IEMTLB_ENTRY_COUNT; 883 && (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)) 783 884 # endif 784 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2) 785 { 786 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag) 787 { 788 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) 789 { 790 pTlb->aEntries[idxEven].uTag = 0; 791 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag) 792 pVCpu->iem.s.cbInstrBufTotal = 0; 793 fMaybeLargePage = true; 794 } 795 else 796 { 797 Assert(fMaybeLargePage == -1); 798 break; 799 } 800 } 801 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) 802 { 803 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) 804 { 805 pTlb->aEntries[idxEven + 1].uTag = 0; 806 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag) 807 pVCpu->iem.s.cbInstrBufTotal = 0; 808 fMaybeLargePage = true; 809 } 810 else 811 { 812 Assert(fMaybeLargePage == -1); 813 break; 814 } 815 } 816 GCPtrTag++; 817 GCPtrTagGlob++; 818 } 819 } 885 { 886 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc); 887 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) 888 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag); 820 889 else 821 { 822 /* 4MB large page */ 823 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64(22 - GUEST_PAGE_SHIFT) - 1U); 824 RTGCPTR GCPtrTagGlob = GCPtrTag | pTlb->uTlbRevisionGlobal; 825 GCPtrTag |= pTlb->uTlbRevision; 826 827 # if IEMTLB_ENTRY_COUNT >= 1024 828 idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag); 829 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)0; 830 uintptr_t const idxEvenEnd = idxEven + 1024; 831 # else 832 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK 833 & ~(RTGCPTR)( (RT_BIT_64(10 - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U) 834 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO); 835 uintptr_t const idxEvenEnd = IEMTLB_ENTRY_COUNT; 836 # endif 837 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2) 838 { 839 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag) 840 { 841 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) 842 { 843 pTlb->aEntries[idxEven].uTag = 0; 844 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag) 845 pVCpu->iem.s.cbInstrBufTotal = 0; 846 fMaybeLargePage = true; 847 } 848 else 849 { 850 Assert(fMaybeLargePage == -1); 851 break; 852 } 853 } 854 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) 855 { 856 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) 857 { 858 pTlb->aEntries[idxEven + 1].uTag = 0; 859 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag) 860 pVCpu->iem.s.cbInstrBufTotal = 0; 861 fMaybeLargePage = true; 862 } 863 else 864 { 865 Assert(fMaybeLargePage == -1); 866 break; 867 } 868 } 869 GCPtrTag++; 870 GCPtrTagGlob++; 871 } 872 } 873 } 874 } 875 #endif 876 890 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag); 891 } 892 } 893 894 #endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */ 877 895 878 896 /** … … 1229 1247 pTlbe--; 1230 1248 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision; 1249 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 1250 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 1231 1251 } 1232 1252 else … … 1234 1254 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++; 1235 1255 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal; 1256 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 1257 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 1236 1258 } 1237 1259 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A)) … … 6773 6795 pTlbe--; 6774 6796 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision; 6797 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 6798 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 6775 6799 } 6776 6800 else … … 6778 6802 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++; 6779 6803 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal; 6804 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 6805 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 6780 6806 } 6781 6807 } … … 7165 7191 pTlbe--; 7166 7192 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision; 7193 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 7194 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 7167 7195 } 7168 7196 else … … 7173 7201 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++; 7174 7202 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal; 7203 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 7204 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 7175 7205 } 7176 7206 } -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r105261 r105284 206 206 #endif 207 207 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev; 208 #ifndef VBOX_VMM_TARGET_ARMV8 209 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX; 210 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX; 211 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX; 212 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX; 213 #endif 208 214 209 215 /* … … 343 349 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu); 344 350 351 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 352 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu); 353 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 354 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu); 355 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 356 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu); 357 358 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 359 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu); 360 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 361 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu); 362 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 363 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu); 364 345 365 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 346 366 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu); … … 416 436 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 417 437 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu); 438 439 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 440 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu); 441 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 442 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu); 443 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 444 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu); 445 446 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 447 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu); 448 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 449 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu); 450 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 451 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu); 418 452 419 453 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, -
trunk/src/VBox/VMM/include/IEMInternal.h
r105283 r105284 571 571 uint64_t uTlbRevisionGlobal; 572 572 573 /** Large page tag range. 574 * 575 * This is used to avoid scanning a large page's worth of TLB entries for each 576 * INVLPG instruction, and only to do so iff we've loaded any and when the 577 * address is in this range. This is kept up to date when we loading new TLB 578 * entries. 579 */ 580 struct LARGEPAGERANGE 581 { 582 /** The lowest large page address tag, UINT64_MAX if none. */ 583 uint64_t uFirstTag; 584 /** The highest large page address tag (with offset mask part set), 0 if none. */ 585 uint64_t uLastTag; 586 } 587 /** Large page range for non-global pages. */ 588 NonGlobalLargePageRange, 589 /** Large page range for global pages. */ 590 GlobalLargePageRange; 591 /** Number of non-global entries for large pages loaded since last TLB flush. */ 592 uint32_t cTlbNonGlobalLargePageCurLoads; 593 /** Number of global entries for large pages loaded since last TLB flush. */ 594 uint32_t cTlbGlobalLargePageCurLoads; 595 573 596 /* Statistics: */ 574 597 … … 630 653 uint32_t cTlbPhysRevRollovers; 631 654 632 uint32_t au32Padding[10];655 /*uint32_t au32Padding[2];*/ 633 656 634 657 /** The TLB entries.
Note:
See TracChangeset
for help on using the changeset viewer.