- Timestamp:
- Jul 23, 2024 10:50:17 AM (6 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r105291 r105440 727 727 pVCpu->iem.s.cbInstrBufTotal = 0; 728 728 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb); 729 if (a_fGlobal) 730 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false); 731 else 732 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false); 729 733 # endif 730 734 731 735 # ifdef IEM_WITH_DATA_TLB 732 736 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb); 737 if (a_fGlobal) 738 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true); 739 else 740 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true); 733 741 # endif 734 742 #else … … 895 903 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr) 896 904 { 905 IEMTLBTRACE_INVLPG(pVCpu, GCPtr); 897 906 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 898 907 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr)); … … 1240 1249 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 1241 1250 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 1251 IEMTLBTRACE_LOAD(pVCpu, GCPtrFirst, false); 1242 1252 } 1243 1253 else … … 1247 1257 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 1248 1258 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 1259 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, false); 1249 1260 } 1250 1261 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A)) … … 6788 6799 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 6789 6800 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 6801 IEMTLBTRACE_LOAD(pVCpu, GCPtrMem, true); 6790 6802 } 6791 6803 else … … 6795 6807 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 6796 6808 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 6809 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, true); 6797 6810 } 6798 6811 } … … 7184 7197 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 7185 7198 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 7199 IEMTLBTRACE_LOAD(pVCpu, GCPtrMem, true); 7186 7200 } 7187 7201 else … … 7194 7208 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE) 7195 7209 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)); 7210 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, true); 7196 7211 } 7197 7212 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r105356 r105440 5989 5989 } 5990 5990 5991 IEMTLBTRACE_LOAD_CR0(pVCpu, uNewCrX, uOldCrX); 5992 5991 5993 /* 5992 5994 * Inform PGM. … … 6110 6112 } 6111 6113 6114 IEMTLBTRACE_LOAD_CR3(pVCpu, uNewCrX, pVCpu->cpum.GstCtx.cr3); 6115 6112 6116 /* Inform PGM. */ 6113 6117 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) … … 6201 6205 } 6202 6206 } 6207 6208 IEMTLBTRACE_LOAD_CR4(pVCpu, uNewCrX, uOldCrX); 6203 6209 6204 6210 /* … … 6880 6886 * Fetch the invpcid descriptor from guest memory. 6881 6887 */ 6888 /** @todo Check if the entire 128 bits are always read for all types. Check for invalid types as well. */ 6882 6889 RTUINT128U uDesc; 6883 6890 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc); … … 6914 6921 6915 6922 /* Invalidate mappings for the linear address tagged with PCID except global translations. */ 6923 /** @todo PGMFlushTLB is overkill for X86_INVPCID_TYPE_INDV_ADDR. Add a fGlobal parameter 6924 * to PGMInvalidatePage or add a new function to support this variation of invlpg. */ 6916 6925 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */); 6917 6926 break; … … 7286 7295 } 7287 7296 #endif 7297 7298 if (idMsr == MSR_K6_EFER) 7299 IEMTLBTRACE_LOAD_EFER(pVCpu, uValue.u, pVCpu->cpum.GstCtx.msrEFER); 7288 7300 7289 7301 /* -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r105428 r105440 50 50 #include <iprt/assert.h> 51 51 #include <iprt/getopt.h> 52 #ifdef IEM_WITH_TLB_TRACE 53 # include <iprt/mem.h> 54 #endif 52 55 #include <iprt/string.h> 53 56 … … 64 67 static FNDBGFINFOARGVINT iemR3InfoITlb; 65 68 static FNDBGFINFOARGVINT iemR3InfoDTlb; 69 #ifdef IEM_WITH_TLB_TRACE 70 static FNDBGFINFOARGVINT iemR3InfoTlbTrace; 71 #endif 66 72 #if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8) 67 73 static FNDBGFINFOARGVINT iemR3InfoTb; … … 203 209 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 204 210 { 205 PVMCPU pVCpu = pVM->apCpusR3[idCpu];211 PVMCPU const pVCpu = pVM->apCpusR3[idCpu]; 206 212 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */ 207 213 … … 286 292 */ 287 293 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount; 294 #endif 295 296 #ifdef IEM_WITH_TLB_TRACE 297 /* 298 * Allocate trace buffer. 299 */ 300 pVCpu->iem.s.idxTlbTraceEntry = 0; 301 pVCpu->iem.s.cTlbTraceEntriesShift = 19;//16; 302 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) 303 * sizeof(*pVCpu->iem.s.paTlbTraceEntries)); 304 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY); 288 305 #endif 289 306 } … … 958 975 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT); 959 976 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT); 977 #ifdef IEM_WITH_TLB_TRACE 978 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT); 979 #endif 960 980 #if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8) 961 981 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT); … … 972 992 { 973 993 NOREF(pVM); 994 #ifdef IEM_WITH_TLB_TRACE 995 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 996 { 997 PVMCPU const pVCpu = pVM->apCpusR3[idCpu]; 998 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries, 999 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries)); 1000 } 1001 #endif 974 1002 return VINF_SUCCESS; 975 1003 } … … 1162 1190 #endif 1163 1191 1164 pHlp->pfnPrintf(pHlp, "%0*x: %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",1165 RT_ELEMENTS(pTlb->aEntries) >= 0x1000 ? 4 : RT_ELEMENTS(pTlb->aEntries) >= 0x100 ? 3 : 2,uSlot,1192 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n", 1193 uSlot, 1166 1194 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid " 1167 1195 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty " … … 1410 1438 } 1411 1439 1440 1441 #ifdef IEM_WITH_TLB_TRACE 1442 /** 1443 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace} 1444 */ 1445 static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs) 1446 { 1447 /* 1448 * Parse arguments. 1449 */ 1450 static RTGETOPTDEF const s_aOptions[] = 1451 { 1452 { "--cpu", 'c', RTGETOPT_REQ_UINT32 }, 1453 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 }, 1454 { "--last", 'l', RTGETOPT_REQ_UINT32 }, 1455 { "--limit", 'l', RTGETOPT_REQ_UINT32 }, 1456 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING }, 1457 }; 1458 1459 RTGETOPTSTATE State; 1460 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/); 1461 AssertRCReturnVoid(rc); 1462 1463 uint32_t cLimit = UINT32_MAX; 1464 bool fStopAtGlobalFlush = false; 1465 PVMCPU const pVCpuCall = VMMGetCpu(pVM); 1466 PVMCPU pVCpu = pVCpuCall; 1467 if (!pVCpu) 1468 pVCpu = VMMGetCpuById(pVM, 0); 1469 1470 RTGETOPTUNION ValueUnion; 1471 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0) 1472 { 1473 switch (rc) 1474 { 1475 case 'c': 1476 if (ValueUnion.u32 >= pVM->cCpus) 1477 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32); 1478 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32) 1479 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32); 1480 break; 1481 1482 case 'l': 1483 cLimit = ValueUnion.u32; 1484 break; 1485 1486 case 'g': 1487 fStopAtGlobalFlush = true; 1488 break; 1489 1490 case 'h': 1491 pHlp->pfnPrintf(pHlp, 1492 "Usage: info tlbtrace [options]\n" 1493 "\n" 1494 "Options:\n" 1495 " -c<n>, --cpu=<n>, --vcpu=<n>\n" 1496 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n" 1497 " -l<n>, --last=<n>\n" 1498 " Limit display to the last N entries. Default: all\n" 1499 " -g,--stop-at-global-flush\n" 1500 " Stop after the first global flush entry.\n" 1501 ); 1502 return; 1503 1504 default: 1505 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State); 1506 return; 1507 } 1508 } 1509 1510 /* 1511 * Get the details. 1512 */ 1513 AssertReturnVoid(pVCpu); 1514 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28); 1515 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry; 1516 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28); 1517 uint32_t const fMask = RT_BIT_32(cShift) - 1; 1518 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit); 1519 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries; 1520 if (cLeft && paEntries) 1521 { 1522 /* 1523 * Display the entries. 1524 */ 1525 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu); 1526 while (cLeft-- > 0) 1527 { 1528 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask]; 1529 switch (pCur->enmType) 1530 { 1531 case kIemTlbTraceType_InvlPg: 1532 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "\n", 1533 idx, pCur->rip, pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)); 1534 break; 1535 case kIemTlbTraceType_Flush: 1536 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64\n", idx, pCur->rip, 1537 pCur->bParam ? "data" : "code", pCur->u64Param); 1538 break; 1539 case kIemTlbTraceType_FlushGlobal: 1540 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64\n", idx, pCur->rip, 1541 pCur->bParam ? "data" : "code", pCur->u64Param, pCur->u64Param2); 1542 if (fStopAtGlobalFlush) 1543 return; 1544 break; 1545 case kIemTlbTraceType_Load: 1546 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load %s %RGv slot=" IEMTLB_SLOT_FMT "\n", 1547 idx, pCur->rip, pCur->bParam ? "data" : "code", 1548 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)); 1549 break; 1550 case kIemTlbTraceType_LoadGlobal: 1551 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load %s %RGv slot=" IEMTLB_SLOT_FMT " (global)\n", 1552 idx, pCur->rip, pCur->bParam ? "data" : "code", 1553 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)); 1554 break; 1555 case kIemTlbTraceType_Load_Cr0: 1556 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)\n", 1557 idx, pCur->rip, pCur->u64Param, pCur->u64Param2); 1558 break; 1559 case kIemTlbTraceType_Load_Cr3: 1560 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)\n", 1561 idx, pCur->rip, pCur->u64Param, pCur->u64Param2); 1562 break; 1563 case kIemTlbTraceType_Load_Cr4: 1564 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)\n", 1565 idx, pCur->rip, pCur->u64Param, pCur->u64Param2); 1566 break; 1567 case kIemTlbTraceType_Load_Efer: 1568 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)\n", 1569 idx, pCur->rip, pCur->u64Param, pCur->u64Param2); 1570 break; 1571 case kIemTlbTraceType_Invalid: 1572 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n"); 1573 break; 1574 } 1575 } 1576 } 1577 else 1578 pHlp->pfnPrintf(pHlp, "No trace entries to display\n"); 1579 } 1580 #endif /* IEM_WITH_TLB_TRACE */ 1581 1412 1582 #if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8) 1413 1583 /** -
trunk/src/VBox/VMM/include/IEMInline.h
r105291 r105440 4798 4798 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 4799 4799 4800 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) 4801 /** 4802 * Adds an entry to the TLB trace buffer. 4803 * 4804 * @note Don't use directly, only via the IEMTLBTRACE_XXX macros. 4805 */ 4806 DECLINLINE(void) iemTlbTrace(PVMCPU pVCpu, IEMTLBTRACETYPE enmType, uint64_t u64Param, uint64_t u64Param2 = 0, 4807 uint8_t bParam = 0 /*, uint32_t u32Param = 0, uint16_t u16Param = 0 */) 4808 { 4809 uint32_t const fMask = RT_BIT_32(pVCpu->iem.s.cTlbTraceEntriesShift) - 1; 4810 PIEMTLBTRACEENTRY const pEntry = &pVCpu->iem.s.paTlbTraceEntries[pVCpu->iem.s.idxTlbTraceEntry++ & fMask]; 4811 pEntry->u64Param = u64Param; 4812 pEntry->u64Param2 = u64Param2; 4813 pEntry->u16Param = 0; //u16Param; 4814 pEntry->u32Param = 0; //u32Param; 4815 pEntry->bParam = bParam; 4816 pEntry->enmType = enmType; 4817 pEntry->rip = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; 4818 } 4819 #endif 4820 4800 4821 #endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */ -
trunk/src/VBox/VMM/include/IEMInternal-armv8.h
r105411 r105440 301 301 302 302 303 /** The TLB size (power of two). 304 * We initially chose 256 because that way we can obtain the result directly 305 * from a 8-bit register without an additional AND instruction. 306 * See also @bugref{10687}. */ 307 #define IEMTLB_ENTRY_COUNT 256 308 #define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO 8 309 310 /** TLB slot format spec (assumes uint32_t or unsigned value). */ 311 #if IEMTLB_ENTRY_COUNT <= 0x100 / 2 312 # define IEMTLB_SLOT_FMT "%02x" 313 #elif IEMTLB_ENTRY_COUNT <= 0x1000 / 2 314 # define IEMTLB_SLOT_FMT "%03x" 315 #elif IEMTLB_ENTRY_COUNT <= 0x10000 / 2 316 # define IEMTLB_SLOT_FMT "%04x" 317 #else 318 # define IEMTLB_SLOT_FMT "%05x" 319 #endif 320 321 303 322 /** 304 323 * An IEM TLB. … … 311 330 * We've choosen 256 because that way we can obtain the result directly from a 312 331 * 8-bit register without an additional AND instruction. */ 313 IEMTLBENTRY aEntries[ 256];332 IEMTLBENTRY aEntries[IEMTLB_ENTRY_COUNT]; 314 333 /** The TLB revision. 315 334 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented -
trunk/src/VBox/VMM/include/IEMInternal.h
r105411 r105440 527 527 | IEMTLBE_F_PHYS_REV ) 528 528 529 529 530 /** The TLB size (power of two). 530 531 * We initially chose 256 because that way we can obtain the result directly … … 535 536 # define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO 8 536 537 #else 537 # define IEMTLB_ENTRY_COUNT 8192538 # define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO 1 3538 # define IEMTLB_ENTRY_COUNT 16384 539 # define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO 14 539 540 #endif 540 541 AssertCompile(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) == IEMTLB_ENTRY_COUNT); 542 543 /** TLB slot format spec (assumes uint32_t or unsigned value). */ 544 #if IEMTLB_ENTRY_COUNT <= 0x100 / 2 545 # define IEMTLB_SLOT_FMT "%02x" 546 #elif IEMTLB_ENTRY_COUNT <= 0x1000 / 2 547 # define IEMTLB_SLOT_FMT "%03x" 548 #elif IEMTLB_ENTRY_COUNT <= 0x10000 / 2 549 # define IEMTLB_SLOT_FMT "%04x" 550 #else 551 # define IEMTLB_SLOT_FMT "%05x" 552 #endif 553 541 554 542 555 /** … … 700 713 */ 701 714 #define IEMTLB_TAG_TO_EVEN_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_EVEN_INDEX(a_uTag)] ) 715 716 /** Converts a GC address to an even TLB index. */ 717 #define IEMTLB_ADDR_TO_EVEN_INDEX(a_GCPtr) IEMTLB_TAG_TO_EVEN_INDEX(IEMTLB_CALC_TAG_NO_REV(a_GCPtr)) 718 719 720 /** @def IEM_WITH_TLB_TRACE 721 * Enables the TLB tracing. 722 * Adjust buffer size in IEMR3Init. */ 723 #if defined(DOXYGEN_RUNNING) || 0 724 # define IEM_WITH_TLB_TRACE 725 #endif 726 727 #ifdef IEM_WITH_TLB_TRACE 728 729 /** TLB trace entry types. */ 730 typedef enum : uint8_t 731 { 732 kIemTlbTraceType_Invalid, 733 kIemTlbTraceType_InvlPg, 734 kIemTlbTraceType_Flush, 735 kIemTlbTraceType_FlushGlobal, 736 kIemTlbTraceType_Load, 737 kIemTlbTraceType_LoadGlobal, 738 kIemTlbTraceType_Load_Cr0, 739 kIemTlbTraceType_Load_Cr3, 740 kIemTlbTraceType_Load_Cr4, 741 kIemTlbTraceType_Load_Efer 742 } IEMTLBTRACETYPE; 743 744 /** TLB trace entry. */ 745 typedef struct IEMTLBTRACEENTRY 746 { 747 /** The flattened RIP for the event. */ 748 uint64_t rip; 749 /** The event type. */ 750 IEMTLBTRACETYPE enmType; 751 /** Byte parameter - typically used as 'bool fDataTlb'. */ 752 uint8_t bParam; 753 /** 16-bit parameter value. */ 754 uint16_t u16Param; 755 /** 32-bit parameter value. */ 756 uint32_t u32Param; 757 /** 64-bit parameter value. */ 758 uint64_t u64Param; 759 /** 64-bit parameter value. */ 760 uint64_t u64Param2; 761 } IEMTLBTRACEENTRY; 762 AssertCompileSize(IEMTLBTRACEENTRY, 32); 763 /** Pointer to a TLB trace entry. */ 764 typedef IEMTLBTRACEENTRY *PIEMTLBTRACEENTRY; 765 /** Pointer to a const TLB trace entry. */ 766 typedef IEMTLBTRACEENTRY const *PCIEMTLBTRACEENTRY; 767 #endif /* !IEM_WITH_TLB_TRACE */ 768 769 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) 770 # define IEMTLBTRACE_INVLPG(a_pVCpu, a_GCPtr) iemTlbTrace(a_pVCpu, kIemTlbTraceType_InvlPg, a_GCPtr) 771 # define IEMTLBTRACE_FLUSH(a_pVCpu, a_uRev, a_fDataTlb) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Flush, a_uRev, 0, a_fDataTlb) 772 # define IEMTLBTRACE_FLUSH_GLOBAL(a_pVCpu, a_uRev, a_uGRev, a_fDataTlb) \ 773 iemTlbTrace(a_pVCpu, kIemTlbTraceType_FlushGlobal, a_uRev, a_uGRev, a_fDataTlb) 774 # define IEMTLBTRACE_LOAD(a_pVCpu, a_GCPtr, a_fDataTlb) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load, a_GCPtr, 0, a_fDataTlb) 775 # define IEMTLBTRACE_LOAD_GLOBAL(a_pVCpu, a_GCPtr, a_fDataTlb) \ 776 iemTlbTrace(a_pVCpu, kIemTlbTraceType_LoadGlobal, a_GCPtr, 0, a_fDataTlb) 777 # define IEMTLBTRACE_LOAD_CR0(a_pVCpu, a_uNew, a_uOld) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr0, a_uNew, a_uOld) 778 # define IEMTLBTRACE_LOAD_CR3(a_pVCpu, a_uNew, a_uOld) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr3, a_uNew, a_uOld) 779 # define IEMTLBTRACE_LOAD_CR4(a_pVCpu, a_uNew, a_uOld) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr4, a_uNew, a_uOld) 780 # define IEMTLBTRACE_LOAD_EFER(a_pVCpu, a_uNew, a_uOld) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Efer, a_uNew, a_uOld) 781 #else 782 # define IEMTLBTRACE_INVLPG(a_pVCpu, a_GCPtr) do { } while (0) 783 # define IEMTLBTRACE_FLUSH(a_pVCpu, a_uRev, a_fDataTlb) do { } while (0) 784 # define IEMTLBTRACE_FLUSH_GLOBAL(a_pVCpu, a_uRev, a_uGRev, a_fDataTlb) do { } while (0) 785 # define IEMTLBTRACE_LOAD(a_pVCpu, a_GCPtr, a_fDataTlb) do { } while (0) 786 # define IEMTLBTRACE_LOAD_GLOBAL(a_pVCpu, a_GCPtr, a_fDataTlb) do { } while (0) 787 # define IEMTLBTRACE_LOAD_CR0(a_pVCpu, a_uNew, a_uOld) do { } while (0) 788 # define IEMTLBTRACE_LOAD_CR3(a_pVCpu, a_uNew, a_uOld) do { } while (0) 789 # define IEMTLBTRACE_LOAD_CR4(a_pVCpu, a_uNew, a_uOld) do { } while (0) 790 # define IEMTLBTRACE_LOAD_EFER(a_pVCpu, a_uNew, a_uOld) do { } while (0) 791 #endif 702 792 703 793 … … 2236 2326 /** @} */ 2237 2327 2328 #ifdef IEM_WITH_TLB_TRACE 2329 uint64_t au64Padding[3]; 2330 #else 2238 2331 uint64_t au64Padding[5]; 2332 #endif 2239 2333 /** @} */ 2334 2335 #ifdef IEM_WITH_TLB_TRACE 2336 /** The end (next) trace entry. */ 2337 uint32_t idxTlbTraceEntry; 2338 /** Number of trace entries allocated expressed as a power of two. */ 2339 uint32_t cTlbTraceEntriesShift; 2340 /** The trace entries. */ 2341 PIEMTLBTRACEENTRY paTlbTraceEntries; 2342 #endif 2240 2343 2241 2344 /** Data TLB.
Note:
See TracChangeset
for help on using the changeset viewer.