VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 109008

Last change on this file since 109008 was 108791, checked in by vboxsync, 3 weeks ago

VMM/IEM: More ARM target work. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 150.3 KB
Line 
1/* $Id: IEMR3.cpp 108791 2025-03-28 21:58:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39#include "IEMInternal.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/vmapi.h>
42#include <VBox/err.h>
43#ifdef VBOX_WITH_DEBUGGER
44# include <VBox/dbg.h>
45#endif
46
47#include <iprt/assert.h>
48#include <iprt/getopt.h>
49#ifdef IEM_WITH_TLB_TRACE
50# include <iprt/mem.h>
51#endif
52#include <iprt/string.h>
53
54#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
55# include "IEMN8veRecompiler.h"
56# include "IEMThreadedFunctions.h"
57# include "IEMInline.h"
58# ifdef VBOX_VMM_TARGET_X86
59# include "VMMAll/target-x86/IEMInline-x86.h"
60# endif
61#endif
62
63
64/*********************************************************************************************************************************
65* Internal Functions *
66*********************************************************************************************************************************/
67static FNDBGFINFOARGVINT iemR3InfoITlb;
68static FNDBGFINFOARGVINT iemR3InfoDTlb;
69#ifdef IEM_WITH_TLB_TRACE
70static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
71#endif
72#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
73static FNDBGFINFOARGVINT iemR3InfoTb;
74static FNDBGFINFOARGVINT iemR3InfoTbTop;
75#endif
76#ifdef VBOX_WITH_DEBUGGER
77static void iemR3RegisterDebuggerCommands(void);
78#endif
79
80
81#if !defined(VBOX_VMM_TARGET_ARMV8)
82static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
83{
84 switch (enmTargetCpu)
85 {
86#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
87 CASE_RET_STR(IEMTARGETCPU_8086);
88 CASE_RET_STR(IEMTARGETCPU_V20);
89 CASE_RET_STR(IEMTARGETCPU_186);
90 CASE_RET_STR(IEMTARGETCPU_286);
91 CASE_RET_STR(IEMTARGETCPU_386);
92 CASE_RET_STR(IEMTARGETCPU_486);
93 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
94 CASE_RET_STR(IEMTARGETCPU_PPRO);
95 CASE_RET_STR(IEMTARGETCPU_CURRENT);
96#undef CASE_RET_STR
97 default: return "Unknown";
98 }
99}
100#endif
101
102
103#if defined(RT_ARCH_ARM64) && defined(_MSC_VER)
104# pragma warning(disable:4883) /* profile build: IEMR3.cpp(114) : warning C4883: 'IEMR3Init': function size suppresses optimizations*/
105#endif
106
107/**
108 * Initializes the interpreted execution manager.
109 *
110 * This must be called after CPUM as we're quering information from CPUM about
111 * the guest and host CPUs.
112 *
113 * @returns VBox status code.
114 * @param pVM The cross context VM structure.
115 */
116VMMR3_INT_DECL(int) IEMR3Init(PVM pVM)
117{
118 /*
119 * Read configuration.
120 */
121#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
122 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
123 int rc;
124#endif
125
126#if defined(VBOX_VMM_TARGET_X86) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
127 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
128 * Controls whether the custom VBox specific CPUID host call interface is
129 * enabled or not. */
130# ifdef DEBUG_bird
131 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
132# else
133 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
134# endif
135 AssertLogRelRCReturn(rc, rc);
136#endif
137
138#ifdef VBOX_WITH_IEM_RECOMPILER
139 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
140 * Max number of TBs per EMT. */
141 uint32_t cMaxTbs = 0;
142 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
143 AssertLogRelRCReturn(rc, rc);
144 if (cMaxTbs < _16K || cMaxTbs > _8M)
145 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
146 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
147
148 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
149 * Initial (minimum) number of TBs per EMT in ring-3. */
150 uint32_t cInitialTbs = 0;
151 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
152 AssertLogRelRCReturn(rc, rc);
153 if (cInitialTbs < _16K || cInitialTbs > _8M)
154 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
155 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
156
157 /* Check that the two values makes sense together. Expect user/api to do
158 the right thing or get lost. */
159 if (cInitialTbs > cMaxTbs)
160 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
161 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
162 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
163
164 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
165 * Max executable memory for recompiled code per EMT. */
166 uint64_t cbMaxExec = 0;
167 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
168 AssertLogRelRCReturn(rc, rc);
169 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
171 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
172 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
173
174 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
175 * The executable memory allocator chunk size. */
176 uint32_t cbChunkExec = 0;
177 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
178 AssertLogRelRCReturn(rc, rc);
179 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
180 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
181 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
182 cbChunkExec, cbChunkExec, _1M, _256M);
183
184 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
185 * The initial executable memory allocator size (per EMT). The value is
186 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
187 uint64_t cbInitialExec = 0;
188 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
189 AssertLogRelRCReturn(rc, rc);
190 if (cbInitialExec > cbMaxExec)
191 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
192 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
193 cbInitialExec, cbInitialExec, cbMaxExec);
194
195 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
196 * The translation block use count value to do native recompilation at.
197 * Set to zero to disable native recompilation. */
198 uint32_t uTbNativeRecompileAtUsedCount = 16;
199 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
200 AssertLogRelRCReturn(rc, rc);
201
202 /** @cfgm{/IEM/HostICacheInvalidationViaHostAPI, bool, false}
203 * Whether to use any available host OS API for flushing the instruction cache
204 * after completing an translation block. */
205 bool fFlag = false;
206 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationViaHostAPI", &fFlag, false);
207 AssertLogRelRCReturn(rc, rc);
208 uint8_t fHostICacheInvalidation = fFlag ? IEMNATIVE_ICACHE_F_USE_HOST_API : 0;
209
210 /** @cfgm{/IEM/HostICacheInvalidationEndWithIsb, bool, false}
211 * Whether to include an ISB in the instruction cache invalidation sequence
212 * after completing an translation block. */
213 fFlag = false;
214 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationEndWithIsb", &fFlag, false);
215 AssertLogRelRCReturn(rc, rc);
216 if (fFlag)
217 fHostICacheInvalidation |= IEMNATIVE_ICACHE_F_END_WITH_ISH;
218
219#endif /* VBOX_WITH_IEM_RECOMPILER*/
220
221 /*
222 * Initialize per-CPU data and register statistics.
223 */
224#if 1
225 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
226 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
227#else
228 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
229 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
230#endif
231
232 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
233 {
234 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
235 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
236
237 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
238#ifndef VBOX_VMM_TARGET_ARMV8
239 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
240#endif
241 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
242#ifndef VBOX_VMM_TARGET_ARMV8
243 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
244 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
245 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
246 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
247#endif
248
249#ifndef VBOX_VMM_TARGET_ARMV8
250 pVCpu->iem.s.cTbsTillNextTimerPoll = 128;
251 pVCpu->iem.s.cTbsTillNextTimerPollPrev = 128;
252#endif
253
254 /*
255 * Host and guest CPU information.
256 */
257 if (idCpu == 0)
258 {
259 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
260#if !defined(VBOX_VMM_TARGET_ARMV8)
261 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
262 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
263 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
264# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
265 if (pVCpu->iem.s.enmCpuVendor == CPUMGetHostCpuVendor(pVM))
266 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
267 else
268# endif
269 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
270#else
271 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
272 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
273#endif
274
275#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
276 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
277 {
278 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
279 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
280 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
281 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
282 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
283 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
284 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
285 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
286 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
287 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
288 }
289 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
290 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
291 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
292#else
293 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
294 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
295 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
296#endif
297 }
298 else
299 {
300 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
301 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
302 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
303#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
304 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
305#endif
306 }
307
308 /*
309 * Mark all buffers free.
310 */
311 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
312 while (iMemMap-- > 0)
313 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
314
315#ifdef VBOX_WITH_IEM_RECOMPILER
316 /*
317 * Recompiler state and configuration distribution.
318 */
319 pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
320 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
321 pVCpu->iem.s.fHostICacheInvalidation = fHostICacheInvalidation;
322#endif
323
324#ifdef IEM_WITH_TLB_TRACE
325 /*
326 * Allocate trace buffer.
327 */
328 pVCpu->iem.s.idxTlbTraceEntry = 0;
329 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
330 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
331 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
332 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
333#endif
334 }
335
336
337#ifdef VBOX_WITH_IEM_RECOMPILER
338 /*
339 * Initialize the TB allocator and cache (/ hash table).
340 *
341 * This is done by each EMT to try get more optimal thread/numa locality of
342 * the allocations.
343 */
344 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
345 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
346 AssertLogRelRCReturn(rc, rc);
347#endif
348
349 /*
350 * Register statistics.
351 */
352 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
353 {
354#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
355 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
356 char szPat[128];
357 RT_NOREF_PV(szPat); /* lazy bird */
358 char szVal[128];
359 RT_NOREF_PV(szVal); /* lazy bird */
360
361 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
362 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
363 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
364 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
365 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
366 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
367 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
368 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
369 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
370 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
371 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
372 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
373 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
374 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
375 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
376 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
377 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
378 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
379
380 /* Code TLB: */
381 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
382 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
383 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
384 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
385 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
386 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
387 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
388 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
389 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
390 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
391
392 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
393 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
394 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
395 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
396 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
397 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
398
399 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
400 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
401 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
402 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
403 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
404 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
405
406 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
407 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
408 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
409 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
410 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
411 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
412
413 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
414 "Code TLB page invalidation requests", "/IEM/CPU%u/Tlb/Code/InvlPg", idCpu);
415 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
416 "Code TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeGlobal", idCpu);
417 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
418 "Code TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeNonGlobal", idCpu);
419
420 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
421 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
422 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
423 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
424 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
425 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
426# ifdef IEM_WITH_TLB_STATISTICS
427 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
428 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
429# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
430 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
431 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
432 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
433 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
434# endif
435
436 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
437 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
438 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
439
440 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
441 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
442 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
443
444 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
445 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
446 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
447 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
448
449# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
450 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
451 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
452 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
453 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
454 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
455 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
456 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
457 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
458 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
459 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
460 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
461 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
462 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
463 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
464 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
465
466 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
467 "Code TLB native misses on new page",
468 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
469 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
470 "Code TLB native misses on new page w/ offset",
471 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
472# endif
473# endif /* IEM_WITH_TLB_STATISTICS */
474
475 /* Data TLB organized as best we can... */
476 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
477 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
478 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
479 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
480 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
481 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
482 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
483 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
484 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
485 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
486
487 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
488 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
489 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
490 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
491 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
492 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
493
494 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
495 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
496 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
497 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
498 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
499 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
500
501 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
502 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
503 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
504 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
505 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
506 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
507
508 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
509 "Data TLB page invalidation requests", "/IEM/CPU%u/Tlb/Data/InvlPg", idCpu);
510 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
511 "Data TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeGlobal", idCpu);
512 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
513 "Data TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeNonGlobal", idCpu);
514
515 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
516 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
517 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
518 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
519 "Data TLB global loads",
520 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
521 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
522 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
523 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
524 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
525 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
526 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
527 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
528 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
529 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
530
531 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
532 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
533 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
534 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
535 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
536 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
537 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
538 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
539 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
540 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
541 "Data TLB global loads",
542 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
543
544# ifdef IEM_WITH_TLB_STATISTICS
545# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
546 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
547 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
548 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
549 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
550 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
551 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
552 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
553 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
554 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
555 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
556 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
557 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
558 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
559 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
560 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
561# endif
562# endif
563
564# ifdef IEM_WITH_TLB_STATISTICS
565 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
566 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
567 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
568 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
569 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
570 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
571# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
572 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
573 "Data TLB native stack access hits",
574 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
575 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
576 "Data TLB native data fetch hits",
577 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
578 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
579 "Data TLB native data store hits",
580 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
581 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
582 "Data TLB native mapped data hits",
583 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
584# endif
585 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
586 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
587 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
588
589# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
590 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
591 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
592 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
593# endif
594
595 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
596 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
597 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
598
599 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
600 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
601 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
602 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
603
604# endif /* IEM_WITH_TLB_STATISTICS */
605
606
607#ifdef VBOX_WITH_IEM_RECOMPILER
608 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
609 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
610 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
611 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
612 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
613 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
614# ifdef VBOX_WITH_STATISTICS
615 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
616 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
617 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
618 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
619# endif
620
621# ifdef VBOX_WITH_STATISTICS
622 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPoll, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
623 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll", idCpu);
624 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollRun, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
625 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll/Running", idCpu);
626 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollUnchanged, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
627 "Timer polling interval unchanged", "/IEM/CPU%u/re/TimerPoll/Unchanged", idCpu);
628 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollTiny, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
629 "Timer polling interval tiny", "/IEM/CPU%u/re/TimerPoll/Tiny", idCpu);
630 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollDefaultCalc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
631 "Timer polling interval calculated using defaults", "/IEM/CPU%u/re/TimerPoll/DefaultCalc", idCpu);
632 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollMax, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
633 "Timer polling interval maxed out", "/IEM/CPU%u/re/TimerPoll/Max", idCpu);
634 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorDivision, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE,
635 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorDivision", idCpu);
636 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorMultiplication, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
637 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorMultiplication", idCpu);
638# endif
639 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbsTillNextTimerPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
640 "Timer polling interval (in TBs)", "/IEM/CPU%u/re/TimerPollInterval", idCpu);
641
642 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
643 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
644 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
645 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
646 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
647# ifdef VBOX_WITH_STATISTICS
648 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
649 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
650# endif
651 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
652 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
653 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
654 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
655 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
656 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
657 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
658 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
659 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
660 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
661 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
662 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
663 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
664 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
665 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
666 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
667
668 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
669 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
670 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
671
672 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
673 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
674 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
675 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
676 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
677 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
678 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
679 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
680# ifdef VBOX_WITH_STATISTICS
681 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
682 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
683# endif
684
685 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
686 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
687 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
688 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
689 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
690 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
691
692 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
693 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
694 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckTimersBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
695 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckTimersBreaks", idCpu);
696 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
697 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
698 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
699 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
700 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
701 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
702
703 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
704 "Detected loop full TB", "/IEM/CPU%u/re/LoopFullTbDetected", idCpu);
705 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected2, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
706 "Detected loop full TB but looping back to before the first TB instruction",
707 "/IEM/CPU%u/re/LoopFullTbDetected2", idCpu);
708 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
709 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
710
711 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
712 "Number of times the exec memory allocator failed to allocate a large enough buffer",
713 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
714
715 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
716 "Number of threaded calls per TB that have been properly recompiled to native code",
717 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
718 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
719 "Number of threaded calls per TB that could not be recompiler to native code",
720 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
721 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
722 "Number of threaded calls that could not be recompiler to native code",
723 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
724
725 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
726 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
727 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
728 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
729
730# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
731# ifdef VBOX_WITH_STATISTICS
732 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
733 "Number of calls to iemNativeRegAllocFindFree.",
734 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
735# endif
736 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
737 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
738 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
739# ifdef VBOX_WITH_STATISTICS
740 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
741 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
742 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
743 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
744 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
745 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
746 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
747 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
748 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
749
750# define REG_NATIVE_EFL_GROUP(a_Lower, a_Camel) do { \
751 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponed ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
752 "Postponed all status flag updating, " #a_Lower " instructions", \
753 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
754 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkipped ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
755 "Skipped all status flag updating, " #a_Lower " instructions", \
756 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
757 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflTotal ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
758 "Total number of " #a_Lower " intructions with status flag updating", \
759 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
760 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
761 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
762 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
763 "Postponed all status flag updating, " #a_Lower " instructions, percentage", \
764 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "PostponedPct", idCpu); \
765 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
766 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
767 "Skipped all status flag updating, " #a_Lower " instructions, percentage", \
768 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "SkippedPct", idCpu); \
769 } while (0)
770 REG_NATIVE_EFL_GROUP(arithmetic, Arithmetic);
771 REG_NATIVE_EFL_GROUP(logical, Logical);
772 REG_NATIVE_EFL_GROUP(shift, Shift);
773# undef REG_NATIVE_EFL_GROUP
774
775 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponedEmits, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
776 "Postponed EFLAGS calculation emits", "/IEM/CPU%u/re/NativeEFlags/ZZEmits", idCpu);
777
778 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
779 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippable", idCpu);
780 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippable", idCpu);
781 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippable", idCpu);
782 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippable", idCpu);
783 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippable", idCpu);
784
785 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfRequired", idCpu);
786 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfRequired", idCpu);
787 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfRequired", idCpu);
788 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfRequired", idCpu);
789 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfRequired", idCpu);
790 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfRequired", idCpu);
791
792# ifdef IEMLIVENESS_EXTENDED_LAYOUT
793 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfDelayable", idCpu);
794 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfDelayable", idCpu);
795 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfDelayable", idCpu);
796 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfDelayable", idCpu);
797 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfDelayable", idCpu);
798 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfDelayable", idCpu);
799# endif
800
801 /* Sum up all status bits ('_' is a sorting hack). */
802 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fSkippable*", idCpu);
803 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
804 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
805
806 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fRequired*", idCpu);
807 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
808 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
809
810# ifdef IEMLIVENESS_EXTENDED_LAYOUT
811 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fDelayable*", idCpu);
812 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
813 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
814# endif
815
816 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?f*", idCpu);
817 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
818 "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
819
820 /* Corresponding ratios / percentages of the totals. */
821 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
822 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
823 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
824 "Total skippable EFLAGS status bit updating percentage",
825 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippablePct", idCpu);
826
827 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
828 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
829 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
830 "Total required EFLAGS status bit updating percentage",
831 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequiredPct", idCpu);
832
833# ifdef IEMLIVENESS_EXTENDED_LAYOUT
834 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
835 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
836 "Total potentially delayable EFLAGS status bit updating percentage",
837 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayablePct", idCpu);
838# endif
839
840 /* Ratios of individual bits. */
841 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/Cf*", idCpu) - 3;
842 Assert(szPat[offFlagChar] == 'C');
843 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
844 Assert(szVal[offFlagChar] == 'C');
845 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippablePct", idCpu);
846 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippablePct", idCpu);
847 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippablePct", idCpu);
848 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippablePct", idCpu);
849 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippablePct", idCpu);
850 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippablePct", idCpu);
851
852 /* PC updates total and skipped, with PCT ratio. */
853 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
854 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
855 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
856 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
857 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
858 "Delayed RIP updating percentage",
859 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
860
861# endif /* VBOX_WITH_STATISTICS */
862# ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
863 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEndIfOtherBranchDirty, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
864 "IEM_MC_ENDIF flushing dirty shadow registers for other branch (not good).",
865 "/IEM/CPU%u/re/NativeEndIfOtherBranchDirty", idCpu);
866# endif
867# ifdef VBOX_WITH_STATISTICS
868 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
869 "Number of calls to iemNativeSimdRegAllocFindFree.",
870 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
871 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
872 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
873 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
874 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
875 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
876 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
877 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
878 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
879 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
880 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
881 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
882 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
883
884 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
885 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
886 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
887 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
888 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
889 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
890 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
891 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
892
893 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
894 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
895 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
896 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
897 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
898 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
899 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
900 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
901
902 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
903 "Number of times the TB finishes execution completely",
904 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
905# endif /* VBOX_WITH_STATISTICS */
906 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
907 "Number of times the TB finished through the ReturnBreak label",
908 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
909 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
910 "Number of times the TB finished through the ReturnBreak label",
911 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
912 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
913 "Number of times the TB finished through the ReturnWithFlags label",
914 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
915 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
916 "Number of times the TB finished with some other status value",
917 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
918 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
919 "Number of times the TB finished via long jump / throw",
920 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
921 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
922 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
923 "Number of times the TB finished through the ObsoleteTb label",
924 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
925 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
926 "Number of times the TB finished through the NeedCsLimChecking label",
927 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
928 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
929 "Number of times the TB finished through the CheckBranchMiss label",
930 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
931 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
932 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
933# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
934# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
935# else
936# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
937# endif
938 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
939 "Number of times the TB finished raising a #DE exception",
940 RAISE_PREFIX "RaiseDe", idCpu);
941 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
942 "Number of times the TB finished raising a #UD exception",
943 RAISE_PREFIX "RaiseUd", idCpu);
944 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
945 "Number of times the TB finished raising a SSE related exception",
946 RAISE_PREFIX "RaiseSseRelated", idCpu);
947 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
948 "Number of times the TB finished raising a AVX related exception",
949 RAISE_PREFIX "RaiseAvxRelated", idCpu);
950 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
951 "Number of times the TB finished raising a SSE/AVX floating point related exception",
952 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
953 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
954 "Number of times the TB finished raising a #NM exception",
955 RAISE_PREFIX "RaiseNm", idCpu);
956 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
957 "Number of times the TB finished raising a #GP(0) exception",
958 RAISE_PREFIX "RaiseGp0", idCpu);
959 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
960 "Number of times the TB finished raising a #MF exception",
961 RAISE_PREFIX "RaiseMf", idCpu);
962 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
963 "Number of times the TB finished raising a #XF exception",
964 RAISE_PREFIX "RaiseXf", idCpu);
965
966# ifdef VBOX_WITH_STATISTICS
967 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLoopFullTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
968 "Number of full TB loops.",
969 "/IEM/CPU%u/re/NativeTbExit/LoopFullTb", idCpu);
970# endif
971
972 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
973 "Direct linking #1 with IRQ check succeeded",
974 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
975 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
976 "Direct linking #1 w/o IRQ check succeeded",
977 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
978# ifdef VBOX_WITH_STATISTICS
979 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
980 "Direct linking #1 failed: No TB in lookup table",
981 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
982 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
983 "Direct linking #1 failed: GCPhysPc mismatch",
984 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
985 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
986 "Direct linking #1 failed: TB flags mismatch",
987 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
988 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
989 "Direct linking #1 failed: IRQ or FF pending",
990 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
991# endif
992
993 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
994 "Direct linking #2 with IRQ check succeeded",
995 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
996 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
997 "Direct linking #2 w/o IRQ check succeeded",
998 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
999# ifdef VBOX_WITH_STATISTICS
1000 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1001 "Direct linking #2 failed: No TB in lookup table",
1002 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
1003 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1004 "Direct linking #2 failed: GCPhysPc mismatch",
1005 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
1006 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1007 "Direct linking #2 failed: TB flags mismatch",
1008 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
1009 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1010 "Direct linking #2 failed: IRQ or FF pending",
1011 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
1012# endif
1013
1014 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
1015 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
1016 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
1017 "/IEM/CPU%u/re/NativeTbExit", idCpu);
1018
1019
1020# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
1021
1022
1023# ifdef VBOX_WITH_STATISTICS
1024 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1025 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
1026 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1027 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
1028 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1029 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
1030 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1031 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
1032# endif
1033
1034
1035#endif /* VBOX_WITH_IEM_RECOMPILER */
1036
1037 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
1038 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1039 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
1040 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
1041 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1042 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
1043
1044# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
1045 /* Instruction statistics: */
1046# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
1047 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1048 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
1049 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1050 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
1051# include "IEMInstructionStatisticsTmpl.h"
1052# undef IEM_DO_INSTR_STAT
1053# endif
1054
1055# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1056 /* Threaded function statistics: */
1057 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
1058 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
1059 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
1060# endif
1061
1062
1063 for (unsigned i = 1; i < RT_ELEMENTS(pVCpu->iem.s.aStatAdHoc); i++)
1064 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatAdHoc[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1065 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/AdHoc/%02u", idCpu, i);
1066
1067#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
1068 }
1069
1070#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1071 /*
1072 * Register the per-VM VMX APIC-access page handler type.
1073 */
1074 if (pVM->cpum.ro.GuestFeatures.fVmx)
1075 {
1076 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
1077 iemVmxApicAccessPageHandler,
1078 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
1079 AssertLogRelRCReturn(rc, rc);
1080 }
1081#endif
1082
1083 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1084 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1085#ifdef IEM_WITH_TLB_TRACE
1086 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
1087#endif
1088#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1089 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
1090 DBGFR3InfoRegisterInternalArgv(pVM, "tbtop", "IEM translation blocks most used or most recently used",
1091 iemR3InfoTbTop, DBGFINFO_FLAGS_RUN_ON_EMT);
1092#endif
1093#ifdef VBOX_WITH_DEBUGGER
1094 iemR3RegisterDebuggerCommands();
1095#endif
1096
1097 return VINF_SUCCESS;
1098}
1099
1100
1101VMMR3_INT_DECL(int) IEMR3Term(PVM pVM)
1102{
1103 NOREF(pVM);
1104#ifdef IEM_WITH_TLB_TRACE
1105 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1106 {
1107 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1108 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1109 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1110 }
1111#endif
1112#if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) && defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
1113 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1114 iemThreadedSaveTbForProfilingCleanup(pVM->apCpusR3[idCpu]);
1115#endif
1116 return VINF_SUCCESS;
1117}
1118
1119
1120VMMR3_INT_DECL(void) IEMR3Relocate(PVM pVM)
1121{
1122 RT_NOREF(pVM);
1123}
1124
1125
1126/**
1127 * Gets the name of a generic IEM exit code.
1128 *
1129 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1130 * @param uExit The IEM exit to name.
1131 */
1132VMMR3_INT_DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1133{
1134 static const char * const s_apszNames[] =
1135 {
1136 /* external interrupts */
1137 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1138 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1139 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1140 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1141 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1142 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1143 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1144 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1145 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1146 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1147 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1148 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1149 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1150 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1151 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1152 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1153 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1154 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1155 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1156 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1157 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1158 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1159 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1160 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1161 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1162 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1163 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1164 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1165 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1166 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1167 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1168 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1169 /* software interrups */
1170 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1171 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1172 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1173 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1174 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1175 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1176 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1177 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1178 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1179 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1180 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1181 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1182 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1183 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1184 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1185 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1186 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1187 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1188 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1189 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1190 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1191 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1192 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1193 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1194 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1195 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1196 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1197 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1198 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1199 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1200 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1201 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1202 };
1203 if (uExit < RT_ELEMENTS(s_apszNames))
1204 return s_apszNames[uExit];
1205 return NULL;
1206}
1207
1208
1209/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1210static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1211{
1212 if (*pfHeader)
1213 return;
1214 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1215 *pfHeader = true;
1216}
1217
1218
1219#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1220#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1221
1222/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1223static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1224 uint32_t uSlot, uint32_t fFlags)
1225{
1226#ifndef VBOX_VMM_TARGET_ARMV8
1227 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1228#else
1229 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1230#endif
1231 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1232 return;
1233
1234 /* The address needs to be sign extended, thus the shifting fun here.*/
1235 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1236 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1237 const char *pszValid = "";
1238#ifndef VBOX_VMM_TARGET_ARMV8
1239 char szTmp[128];
1240 if (fFlags & IEMR3INFOTLB_F_CHECK)
1241 {
1242#ifdef VBOX_VMM_TARGET_X86
1243 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1244#endif
1245 PGMPTWALKFAST WalkFast;
1246 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1247 pszValid = szTmp;
1248 if (RT_FAILURE(rc))
1249 switch (rc)
1250 {
1251 case VERR_PAGE_TABLE_NOT_PRESENT:
1252#ifdef VBOX_VMM_TARGET_X86
1253 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1254 {
1255 case 1: pszValid = " stale(page-not-present)"; break;
1256 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1257 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1258 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1259 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1260 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1261 }
1262 break;
1263#else
1264 RT_FALL_THRU(); /** @todo */
1265#endif
1266 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1267 }
1268 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1269 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1270#ifdef VBOX_VMM_TARGET_X86
1271 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1272 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1273 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1274 | fInvSlotG ) )
1275 pszValid = " still-valid";
1276 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1277 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1278 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1279 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1280 {
1281 case X86_PTE_A:
1282 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1283 break;
1284 case X86_PTE_D:
1285 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1286 break;
1287 case X86_PTE_D | X86_PTE_A:
1288 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1289 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1290 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1291 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1292 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1293 break;
1294 default: AssertFailed(); break;
1295 }
1296 else
1297 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1298 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1299 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1300 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1301 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1302 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1303 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1304 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1305 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1306 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1307 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1308#elif defined(VBOX_VMM_TARGET_ARMV8)
1309 else
1310 RTStrPrintf(szTmp, sizeof(szTmp), " stale(todo)");
1311#else
1312# error "port me"
1313#endif
1314 }
1315#else
1316 RT_NOREF(pVCpu);
1317#endif
1318
1319#ifdef VBOX_VMM_TARGET_X86
1320 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1321 uSlot,
1322 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1323 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1324 : "expired",
1325 GCPtr, /* -> */
1326 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1327 /* / */
1328 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1329 /* */
1330 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1331 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1332 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1333 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1334 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1335 !(uSlot & 1) ? "-" : "G",
1336 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1337 /* / */
1338 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1339 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1340 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1341 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1342 /* / */
1343 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1344 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1345 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1346 pszValid);
1347#elif defined(VBOX_VMM_TARGET_ARMV8)
1348 static const char * const s_apszLimAndTopLevelX[] =
1349 { /*0bxyz: z=IEMTLBE_F_S2_NO_LIM_WRITE y=IEMTLBE_F_S2_TL0 x=IEMTLBE_F_S2_TL1 */
1350 /*0b000:*/ "Lw",
1351 /*0b001:*/ "",
1352 /*0b010:*/ "LwTL0",
1353 /*0b011:*/ "!TL0!",
1354 /*0b100:*/ "LwTL1",
1355 /*0b101:*/ "!TL1!",
1356 /*0b110:*/ "LwTL01", /* See MRO-TL01 */
1357 /*0b111:*/ "!TL01!",
1358 };
1359 static const char * const s_apszSizes[] = { "L3", "L2", "L1", "L0" };
1360 AssertCompile(((IEMTLBE_F_S2_NO_LIM_WRITE | IEMTLBE_F_S2_TL0 | IEMTLBE_F_S2_TL1) >> IEMTLBE_F_S2_NO_LIM_WRITE_BIT) == 7);
1361 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT
1362 ": %s %#018RX64 -> %RGp / %p / %#05x U%c%c%c%cP%c%c%c%c%c%c%c/%c%c%c/%s/%c%c%c%c/%c as:%x vm:%x/%s %s%s\n",
1363 uSlot,
1364 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1365 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1366 : "expired",
1367 GCPtr, /* -> */
1368 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1369 /* / */
1370 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~(IEMTLBE_F_PHYS_REV | IEMTLBE_F_S1_ASID | IEMTLBE_F_S2_VMID)),
1371 /* */
1372 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_U_NO_READ ? '-' : 'r',
1373 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_U_NO_WRITE ? '-' : 'w',
1374 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_U_NO_EXEC ? '-' : 'x',
1375 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_U_NO_GCS ? '-' : 's',
1376 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_P_NO_READ ? '-' : 'r',
1377 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_P_NO_WRITE ? '-' : 'w',
1378 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_P_NO_EXEC ? '-' : 'x',
1379 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_P_NO_GCS ? '-' : 's',
1380 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_NO_DIRTY ? '-' : 'D',
1381 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_AMEC ? 'A' : '-',
1382 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_DEVICE ? 'd' : '-',
1383 /* / */
1384 !(uSlot & 1) ? '-' : 'G',
1385 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_S1_NS ? '-' : 'S',
1386 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_S1_NSE ? '-' : 'E',
1387 /* / */
1388 s_apszLimAndTopLevelX[(pTlbe->fFlagsAndPhysRev >> IEMTLBE_F_S2_NO_LIM_WRITE_BIT) & 7],
1389 /* / */
1390 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? '-' : 'r',
1391 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? '-' : 'w',
1392 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? 'u' : '-',
1393 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? 'c' : '-',
1394 /* / */
1395 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? 'N' : 'M',
1396 /* */
1397 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_S1_ASID) >> IEMTLBE_F_S1_ASID_SHIFT,
1398 /* */
1399 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_S2_VMID) >> IEMTLBE_F_S2_VMID_SHIFT,
1400 s_apszSizes[(pTlbe->fFlagsAndPhysRev >> IEMTLBE_F_EFF_SIZE_SHIFT) & 3],
1401 /* */
1402 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1403 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1404 pszValid);
1405#else
1406# error "port me"
1407#endif
1408}
1409
1410
1411/** Displays one or more TLB slots. */
1412static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1413 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1414{
1415 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1416 {
1417 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1418 {
1419 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1420 cSlots, RT_ELEMENTS(pTlb->aEntries));
1421 cSlots = RT_ELEMENTS(pTlb->aEntries);
1422 }
1423
1424 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1425 while (cSlots-- > 0)
1426 {
1427 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1428 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1429 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1430 }
1431 }
1432 else
1433 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1434 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1435}
1436
1437
1438/** Displays the TLB slot for the given address. */
1439static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1440 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1441{
1442 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1443
1444 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(pVCpu, uAddress);
1445#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1446 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1447#else
1448 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1449#endif
1450 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1451#ifndef VBOX_VMM_TARGET_ARMV8
1452 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1453#endif
1454 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1455 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1456 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1457 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1458
1459#ifndef VBOX_VMM_TARGET_ARMV8
1460 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1461 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1462 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1463 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1464#endif
1465}
1466
1467
1468/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1469static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1470{
1471 /*
1472 * This is entirely argument driven.
1473 */
1474 static RTGETOPTDEF const s_aOptions[] =
1475 {
1476 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1477 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1478 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1479 { "all", 'A', RTGETOPT_REQ_NOTHING },
1480 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1481 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1482 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1483 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1484 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1485 };
1486
1487 RTGETOPTSTATE State;
1488 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1489 AssertRCReturnVoid(rc);
1490
1491 uint32_t cActionArgs = 0;
1492 bool fNeedHeader = true;
1493 bool fAddressMode = true;
1494 uint32_t fFlags = 0;
1495 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1496 PVMCPU pVCpu = pVCpuCall;
1497 if (!pVCpu)
1498 pVCpu = VMMGetCpuById(pVM, 0);
1499
1500 RTGETOPTUNION ValueUnion;
1501 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1502 {
1503 switch (rc)
1504 {
1505 case 'c':
1506 if (ValueUnion.u32 >= pVM->cCpus)
1507 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1508 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1509 {
1510 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1511 fNeedHeader = true;
1512 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1513 {
1514 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1515 ValueUnion.u32, pVCpuCall->idCpu);
1516 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1517 }
1518 }
1519 break;
1520
1521 case 'C':
1522 if (!pVCpuCall)
1523 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1524 else if (pVCpu != pVCpuCall)
1525 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1526 pVCpu->idCpu, pVCpuCall->idCpu);
1527 else
1528 fFlags |= IEMR3INFOTLB_F_CHECK;
1529 break;
1530
1531 case 'a':
1532 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1533 ValueUnion.u64, fFlags, &fNeedHeader);
1534 fAddressMode = true;
1535 cActionArgs++;
1536 break;
1537
1538 case 'A':
1539 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1540 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1541 cActionArgs++;
1542 break;
1543
1544 case 'r':
1545 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1546 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1547 fAddressMode = false;
1548 cActionArgs++;
1549 break;
1550
1551 case 's':
1552 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1553 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1554 fAddressMode = false;
1555 cActionArgs++;
1556 break;
1557
1558 case 'v':
1559 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1560 break;
1561
1562 case VINF_GETOPT_NOT_OPTION:
1563 if (fAddressMode)
1564 {
1565 uint64_t uAddr;
1566 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1567 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1568 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1569 uAddr, fFlags, &fNeedHeader);
1570 else
1571 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1572 }
1573 else
1574 {
1575 uint32_t uSlot;
1576 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1577 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1578 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1579 uSlot, 1, fFlags, &fNeedHeader);
1580 else
1581 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1582 }
1583 cActionArgs++;
1584 break;
1585
1586 case 'h':
1587 pHlp->pfnPrintf(pHlp,
1588 "Usage: info %ctlb [options]\n"
1589 "\n"
1590 "Options:\n"
1591 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1592 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1593 " -C,--check\n"
1594 " Check valid entries against guest PTs.\n"
1595 " -A, --all, all\n"
1596 " Display all the TLB entries (default if no other args).\n"
1597 " -a<virt>, --address=<virt>\n"
1598 " Shows the TLB entry for the specified guest virtual address.\n"
1599 " -r<slot:count>, --range=<slot:count>\n"
1600 " Shows the TLB entries for the specified slot range.\n"
1601 " -s<slot>,--slot=<slot>\n"
1602 " Shows the given TLB slot.\n"
1603 " -v,--only-valid\n"
1604 " Only show valid TLB entries (TAG, not phys)\n"
1605 "\n"
1606 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1607 "defaulting to addresses if not preceeded by any of those options.\n"
1608 , fITlb ? 'i' : 'd');
1609 return;
1610
1611 default:
1612 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1613 return;
1614 }
1615 }
1616
1617 /*
1618 * If no action taken, we display all (-A) by default.
1619 */
1620 if (!cActionArgs)
1621 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1622 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1623}
1624
1625
1626/**
1627 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1628 */
1629static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1630{
1631 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1632}
1633
1634
1635/**
1636 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1637 */
1638static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1639{
1640 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1641}
1642
1643
1644#ifdef IEM_WITH_TLB_TRACE
1645/**
1646 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1647 */
1648static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1649{
1650 /*
1651 * Parse arguments.
1652 */
1653 static RTGETOPTDEF const s_aOptions[] =
1654 {
1655 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1656 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1657 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1658 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1659 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1660 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1661 };
1662
1663 RTGETOPTSTATE State;
1664 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1665 AssertRCReturnVoid(rc);
1666
1667 uint32_t cLimit = UINT32_MAX;
1668 bool fStopAtGlobalFlush = false;
1669 bool fResolveRip = false;
1670 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1671 PVMCPU pVCpu = pVCpuCall;
1672 if (!pVCpu)
1673 pVCpu = VMMGetCpuById(pVM, 0);
1674
1675 RTGETOPTUNION ValueUnion;
1676 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1677 {
1678 switch (rc)
1679 {
1680 case 'c':
1681 if (ValueUnion.u32 >= pVM->cCpus)
1682 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1683 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1684 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1685 break;
1686
1687 case 'l':
1688 cLimit = ValueUnion.u32;
1689 break;
1690
1691 case 'g':
1692 fStopAtGlobalFlush = true;
1693 break;
1694
1695 case 'r':
1696 fResolveRip = true;
1697 break;
1698
1699 case 'h':
1700 pHlp->pfnPrintf(pHlp,
1701 "Usage: info tlbtrace [options] [n]\n"
1702 "\n"
1703 "Options:\n"
1704 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1705 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1706 " [n], -l<n>, --last=<n>\n"
1707 " Limit display to the last N entries. Default: all\n"
1708 " -g, --stop-at-global-flush\n"
1709 " Stop after the first global flush entry.\n"
1710 " -r, --resolve-rip\n"
1711 " Resolve symbols for the flattened RIP addresses.\n"
1712 );
1713 return;
1714
1715 case VINF_GETOPT_NOT_OPTION:
1716 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1717 if (RT_SUCCESS(rc))
1718 break;
1719 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1720 return;
1721
1722 default:
1723 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1724 return;
1725 }
1726 }
1727
1728 /*
1729 * Get the details.
1730 */
1731 AssertReturnVoid(pVCpu);
1732 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1733 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1734 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1735 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1736 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1737 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1738 if (cLeft && paEntries)
1739 {
1740 /*
1741 * Display the entries.
1742 */
1743 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1744 while (cLeft-- > 0)
1745 {
1746 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1747 const char *pszSymbol = "";
1748 union
1749 {
1750 RTDBGSYMBOL Symbol;
1751 char ach[sizeof(RTDBGSYMBOL) + 32];
1752 } uBuf;
1753 if (fResolveRip)
1754 {
1755 RTGCINTPTR offDisp = 0;
1756 DBGFADDRESS Addr;
1757 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1758 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1759 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1760 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1761 &offDisp, &uBuf.Symbol, NULL);
1762 if (RT_SUCCESS(rc))
1763 {
1764 /* Add displacement. */
1765 if (offDisp)
1766 {
1767 size_t const cchName = strlen(uBuf.Symbol.szName);
1768 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1769 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1770 if (offDisp > 0)
1771 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1772 else
1773 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1774 }
1775
1776 /* Put a space before it. */
1777 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1778 char *pszName = uBuf.Symbol.szName;
1779 *--pszName = ' ';
1780 pszSymbol = pszName;
1781 }
1782 }
1783 static const char *s_apszTlbType[2] = { "code", "data" };
1784 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1785 switch (pCur->enmType)
1786 {
1787 case kIemTlbTraceType_InvlPg:
1788 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1789 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pVCpu, pCur->u64Param), pszSymbol);
1790 break;
1791 case kIemTlbTraceType_EvictSlot:
1792 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1793 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1794 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1795 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1796 pCur->u64Param2, pszSymbol);
1797 break;
1798 case kIemTlbTraceType_LargeEvictSlot:
1799 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1800 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1801 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1802 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1803 pCur->u64Param2, pszSymbol);
1804 break;
1805 case kIemTlbTraceType_LargeScan:
1806 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1807 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1808 break;
1809
1810 case kIemTlbTraceType_Flush:
1811 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1812 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1813 break;
1814 case kIemTlbTraceType_FlushGlobal:
1815 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1816 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1817 if (fStopAtGlobalFlush)
1818 return;
1819 break;
1820 case kIemTlbTraceType_Load:
1821 case kIemTlbTraceType_LoadGlobal:
1822 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1823 idx, pCur->rip,
1824 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1825 pCur->u64Param,
1826 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pVCpu, pCur->u64Param)
1827 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1828 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1829 break;
1830
1831 case kIemTlbTraceType_Load_Cr0:
1832 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1833 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1834 break;
1835 case kIemTlbTraceType_Load_Cr3:
1836 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1837 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1838 break;
1839 case kIemTlbTraceType_Load_Cr4:
1840 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1841 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1842 break;
1843 case kIemTlbTraceType_Load_Efer:
1844 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1845 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1846 break;
1847
1848 case kIemTlbTraceType_Irq:
1849 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1850 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1851 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1852 pszSymbol);
1853 break;
1854 case kIemTlbTraceType_Xcpt:
1855 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1856 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1857 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1858 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1859 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1860 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1861 else
1862 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1863 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1864 break;
1865 case kIemTlbTraceType_IRet:
1866 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1867 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1868 break;
1869
1870 case kIemTlbTraceType_Tb_Compile:
1871 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1872 idx, pCur->rip, pCur->u64Param, pszSymbol);
1873 break;
1874 case kIemTlbTraceType_Tb_Exec_Threaded:
1875 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1876 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1877 break;
1878 case kIemTlbTraceType_Tb_Exec_Native:
1879 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1880 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1881 break;
1882
1883 case kIemTlbTraceType_User0:
1884 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1885 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1886 break;
1887 case kIemTlbTraceType_User1:
1888 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1889 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1890 break;
1891 case kIemTlbTraceType_User2:
1892 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1893 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1894 break;
1895 case kIemTlbTraceType_User3:
1896 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1897 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1898 break;
1899
1900 case kIemTlbTraceType_Invalid:
1901 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1902 break;
1903 }
1904 }
1905 }
1906 else
1907 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1908}
1909#endif /* IEM_WITH_TLB_TRACE */
1910
1911#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1912
1913/**
1914 * Get get compile time flat PC for the TB.
1915 */
1916DECL_FORCE_INLINE(RTGCPTR) iemR3GetTbFlatPc(PCIEMTB pTb)
1917{
1918#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
1919 if (pTb->fFlags & IEMTB_F_TYPE_NATIVE)
1920 {
1921 PCIEMTBDBG const pDbgInfo = pTb->pDbgInfo;
1922 return pDbgInfo ? pDbgInfo->FlatPc : RTGCPTR_MAX;
1923 }
1924#endif
1925 return pTb->FlatPc;
1926}
1927
1928
1929/**
1930 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1931 */
1932static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1933{
1934 /*
1935 * Parse arguments.
1936 */
1937 static RTGETOPTDEF const s_aOptions[] =
1938 {
1939 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1940 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1941 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1942 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1943 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1944 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1945 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1946 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1947 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1948 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1949 { "--tb", 't', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1950 { "--tb-id", 't', RTGETOPT_REQ_UINT32 },
1951 };
1952
1953 RTGETOPTSTATE State;
1954 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1955 AssertRCReturnVoid(rc);
1956
1957 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1958 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1959 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1960 RTGCPHYS GCVirt = NIL_RTGCPTR;
1961 uint32_t fFlags = UINT32_MAX;
1962 uint32_t idTb = UINT32_MAX;
1963
1964 RTGETOPTUNION ValueUnion;
1965 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1966 {
1967 switch (rc)
1968 {
1969 case 'c':
1970 if (ValueUnion.u32 >= pVM->cCpus)
1971 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1972 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1973 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1974 break;
1975
1976 case 'a':
1977 GCVirt = ValueUnion.u64;
1978 GCPhysPc = NIL_RTGCPHYS;
1979 idTb = UINT32_MAX;
1980 break;
1981
1982 case 'p':
1983 GCVirt = NIL_RTGCPHYS;
1984 GCPhysPc = ValueUnion.u64;
1985 idTb = UINT32_MAX;
1986 break;
1987
1988 case 'f':
1989 fFlags = ValueUnion.u32;
1990 break;
1991
1992 case 't':
1993 GCVirt = NIL_RTGCPHYS;
1994 GCPhysPc = NIL_RTGCPHYS;
1995 idTb = ValueUnion.u32;
1996 break;
1997
1998 case VINF_GETOPT_NOT_OPTION:
1999 {
2000 if ( (ValueUnion.psz[0] == 'T' || ValueUnion.psz[0] == 't')
2001 && (ValueUnion.psz[1] == 'B' || ValueUnion.psz[1] == 'b')
2002 && ValueUnion.psz[2] == '#')
2003 {
2004 rc = RTStrToUInt32Full(&ValueUnion.psz[3], 0, &idTb);
2005 if (RT_SUCCESS(rc))
2006 {
2007 GCVirt = NIL_RTGCPHYS;
2008 GCPhysPc = NIL_RTGCPHYS;
2009 break;
2010 }
2011 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to TD ID: %Rrc\n", ValueUnion.psz, rc);
2012 }
2013 else
2014 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
2015 return;
2016 }
2017
2018 case 'h':
2019 pHlp->pfnPrintf(pHlp,
2020 "Usage: info tb [options]\n"
2021 "\n"
2022 "Options:\n"
2023 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
2024 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
2025 " -a<virt>, --address=<virt>\n"
2026 " Shows the TB for the specified guest virtual address.\n"
2027 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
2028 " Shows the TB for the specified guest physical address.\n"
2029 " -t<id>, --tb=<id>, --tb-id=<id>, TD#<id>\n"
2030 " Show the TB specified by the identifier/number (from tbtop).\n"
2031 " -f<flags>,--flags=<flags>\n"
2032 " The TB flags value (hex) to use when looking up the TB.\n"
2033 "\n"
2034 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
2035 return;
2036
2037 default:
2038 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
2039 return;
2040 }
2041 }
2042
2043 /* Currently, only do work on the same EMT. */
2044 if (pVCpu != pVCpuThis)
2045 {
2046 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
2047 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
2048 return;
2049 }
2050
2051 /*
2052 * Defaults.
2053 */
2054 if (GCPhysPc == NIL_RTGCPHYS && idTb == UINT32_MAX)
2055 {
2056 if (GCVirt == NIL_RTGCPTR)
2057 GCVirt = CPUMGetGuestFlatPC(pVCpu);
2058 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
2059 if (RT_FAILURE(rc))
2060 {
2061 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
2062 return;
2063 }
2064 }
2065 if (fFlags == UINT32_MAX && idTb == UINT32_MAX)
2066 {
2067 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
2068 fFlags = iemCalcExecFlags(pVCpu);
2069 if (pVM->cCpus == 1)
2070 fFlags |= IEM_F_X86_DISREGARD_LOCK;
2071 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
2072 fFlags |= IEMTB_F_X86_INHIBIT_SHADOW;
2073 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
2074 fFlags |= IEMTB_F_X86_INHIBIT_NMI;
2075 if ((IEM_F_MODE_X86_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
2076 {
2077 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
2078 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
2079 fFlags |= IEMTB_F_X86_CS_LIM_CHECKS;
2080 }
2081 }
2082
2083 PCIEMTB pTb;
2084 if (idTb == UINT32_MAX)
2085 {
2086 /*
2087 * Do the lookup...
2088 *
2089 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
2090 * have much choice since we don't want to increase use counters and
2091 * trigger native recompilation.
2092 */
2093 fFlags &= IEMTB_F_KEY_MASK;
2094 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
2095 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
2096 pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
2097 while (pTb)
2098 {
2099 if (pTb->GCPhysPc == GCPhysPc)
2100 {
2101 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
2102 {
2103 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
2104 break;
2105 }
2106 }
2107 pTb = pTb->pNext;
2108 }
2109 if (!pTb)
2110 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
2111 }
2112 else
2113 {
2114 /*
2115 * Use the TB ID for indexing.
2116 */
2117 pTb = NULL;
2118 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2119 if (pTbAllocator)
2120 {
2121 size_t const idxTbChunk = idTb / pTbAllocator->cTbsPerChunk;
2122 size_t const idxTbInChunk = idTb % pTbAllocator->cTbsPerChunk;
2123 if (idxTbChunk < pTbAllocator->cAllocatedChunks)
2124 pTb = &pTbAllocator->aChunks[idxTbChunk].paTbs[idxTbInChunk];
2125 else
2126 pHlp->pfnPrintf(pHlp, "Invalid TB ID: %u (%#x)\n", idTb, idTb);
2127 }
2128 }
2129
2130 if (pTb)
2131 {
2132 /*
2133 * Disassemble according to type.
2134 */
2135 size_t const idxTbChunk = pTb->idxAllocChunk;
2136 size_t const idxTbNo = (pTb - &pVCpu->iem.s.pTbAllocatorR3->aChunks[idxTbChunk].paTbs[0])
2137 + idxTbChunk * pVCpu->iem.s.pTbAllocatorR3->cTbsPerChunk;
2138 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2139 {
2140# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2141 case IEMTB_F_TYPE_NATIVE:
2142 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - native\n",
2143 GCPhysPc, iemR3GetTbFlatPc(pTb), fFlags, pVCpu->idCpu, idxTbNo, pTb);
2144 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2145 break;
2146# endif
2147
2148 case IEMTB_F_TYPE_THREADED:
2149 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - threaded\n",
2150 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb);
2151 iemThreadedDisassembleTb(pTb, pHlp);
2152 break;
2153
2154 default:
2155 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - ??? %#x\n",
2156 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb, pTb->fFlags);
2157 break;
2158 }
2159 }
2160}
2161
2162
2163/**
2164 * @callback_method_impl{FNDBGFINFOARGVINT, tbtop}
2165 */
2166static DECLCALLBACK(void) iemR3InfoTbTop(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
2167{
2168 /*
2169 * Parse arguments.
2170 */
2171 static RTGETOPTDEF const s_aOptions[] =
2172 {
2173 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
2174 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
2175 { "--dis", 'd', RTGETOPT_REQ_NOTHING },
2176 { "--disas", 'd', RTGETOPT_REQ_NOTHING },
2177 { "--disasm", 'd', RTGETOPT_REQ_NOTHING },
2178 { "--disassemble", 'd', RTGETOPT_REQ_NOTHING },
2179 { "--no-dis", 'D', RTGETOPT_REQ_NOTHING },
2180 { "--no-disas", 'D', RTGETOPT_REQ_NOTHING },
2181 { "--no-disasm", 'D', RTGETOPT_REQ_NOTHING },
2182 { "--no-disassemble", 'D', RTGETOPT_REQ_NOTHING },
2183 { "--most-freq", 'f', RTGETOPT_REQ_NOTHING },
2184 { "--most-frequent", 'f', RTGETOPT_REQ_NOTHING },
2185 { "--most-frequently", 'f', RTGETOPT_REQ_NOTHING },
2186 { "--most-frequently-used", 'f', RTGETOPT_REQ_NOTHING },
2187 { "--most-recent", 'r', RTGETOPT_REQ_NOTHING },
2188 { "--most-recently", 'r', RTGETOPT_REQ_NOTHING },
2189 { "--most-recently-used", 'r', RTGETOPT_REQ_NOTHING },
2190 { "--count", 'n', RTGETOPT_REQ_UINT32 },
2191 };
2192
2193 RTGETOPTSTATE State;
2194 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
2195 AssertRCReturnVoid(rc);
2196
2197 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
2198 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
2199 enum { kTbTop_MostFrequentlyUsed, kTbTop_MostRececentlyUsed }
2200 enmTop = kTbTop_MostFrequentlyUsed;
2201 bool fDisassemble = false;
2202 uint32_t const cTopDefault = 64;
2203 uint32_t const cTopMin = 1;
2204 uint32_t const cTopMax = 1024;
2205 uint32_t cTop = cTopDefault;
2206
2207 RTGETOPTUNION ValueUnion;
2208 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
2209 {
2210 switch (rc)
2211 {
2212 case 'c':
2213 if (ValueUnion.u32 >= pVM->cCpus)
2214 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
2215 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
2216 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
2217 break;
2218
2219 case 'd':
2220 fDisassemble = true;
2221 break;
2222
2223 case 'D':
2224 fDisassemble = true;
2225 break;
2226
2227 case 'f':
2228 enmTop = kTbTop_MostFrequentlyUsed;
2229 break;
2230
2231 case 'r':
2232 enmTop = kTbTop_MostRececentlyUsed;
2233 break;
2234
2235 case VINF_GETOPT_NOT_OPTION:
2236 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cTop);
2237 if (RT_FAILURE(rc))
2238 {
2239 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
2240 return;
2241 }
2242 ValueUnion.u32 = cTop;
2243 RT_FALL_THROUGH();
2244 case 'n':
2245 if (!ValueUnion.u32)
2246 cTop = cTopDefault;
2247 else
2248 {
2249 cTop = RT_MAX(RT_MIN(ValueUnion.u32, cTopMax), cTopMin);
2250 if (cTop != ValueUnion.u32)
2251 pHlp->pfnPrintf(pHlp, "warning: adjusted %u to %u (valid range: [%u..%u], 0 for default (%d))",
2252 ValueUnion.u32, cTop, cTopMin, cTopMax, cTopDefault);
2253 }
2254 break;
2255
2256 case 'h':
2257 pHlp->pfnPrintf(pHlp,
2258 "Usage: info tbtop [options]\n"
2259 "\n"
2260 "Options:\n"
2261 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
2262 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
2263 " -d, --dis[as[m]], --disassemble\n"
2264 " Show full TB disassembly.\n"
2265 " -D, --no-dis[as[m]], --no-disassemble\n"
2266 " Do not show TB diassembly. The default.\n"
2267 " -f, --most-freq[ent[ly[-used]]]\n"
2268 " Shows the most frequently used TBs (IEMTB::cUsed). The default.\n"
2269 " -r, --most-recent[ly[-used]]\n"
2270 " Shows the most recently used TBs (IEMTB::msLastUsed).\n"
2271 " -n<num>, --count=<num>\n"
2272 " The number of TBs to display. Default: %u\n"
2273 " This is also what non-option arguments will be taken as.\n"
2274 , cTopDefault);
2275 return;
2276
2277 default:
2278 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
2279 return;
2280 }
2281 }
2282
2283 /* Currently, only do work on the same EMT. */
2284 if (pVCpu != pVCpuThis)
2285 {
2286 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
2287 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
2288 return;
2289 }
2290
2291 /*
2292 * Collect the data by scanning the TB allocation map.
2293 */
2294 struct IEMTBTOPENTRY
2295 {
2296 /** Pointer to the translation block. */
2297 PCIEMTB pTb;
2298 /** The sorting key. */
2299 uint64_t uSortKey;
2300 } aTop[cTopMax] = { { NULL, 0 }, };
2301 uint32_t cValid = 0;
2302 PIEMTBALLOCATOR pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2303 if (pTbAllocator)
2304 {
2305 uint32_t const cTbsPerChunk = pTbAllocator->cTbsPerChunk;
2306 for (uint32_t iChunk = 0; iChunk < pTbAllocator->cAllocatedChunks; iChunk++)
2307 {
2308 for (uint32_t iTb = 0; iTb < cTbsPerChunk; iTb++)
2309 {
2310 PCIEMTB const pTb = &pTbAllocator->aChunks[iChunk].paTbs[iTb];
2311 AssertContinue(pTb);
2312 if (pTb->fFlags & IEMTB_F_TYPE_MASK)
2313 {
2314 /* Extract and compose the sort key. */
2315 uint64_t const uSortKey = enmTop == kTbTop_MostFrequentlyUsed
2316 ? RT_MAKE_U64(pTb->msLastUsed, pTb->cUsed)
2317 : RT_MAKE_U64(pTb->cUsed, pTb->msLastUsed);
2318
2319 /*
2320 * Discard the key if it's smaller than the smallest in the table when it is full.
2321 */
2322 if ( cValid >= cTop
2323 && uSortKey <= aTop[cTop - 1].uSortKey)
2324 { /* discard it */ }
2325 else
2326 {
2327 /*
2328 * Do binary search to find the insert location
2329 */
2330 uint32_t idx;
2331 if (cValid > 0)
2332 {
2333 uint32_t idxEnd = cValid;
2334 uint32_t idxStart = 0;
2335 idx = cValid / 2;
2336 for (;;)
2337 {
2338 if (uSortKey > aTop[idx].uSortKey)
2339 {
2340 if (idx > idxStart)
2341 idxEnd = idx;
2342 else
2343 break;
2344 }
2345 else if (uSortKey < aTop[idx].uSortKey)
2346 {
2347 idx += 1;
2348 if (idx < idxEnd)
2349 idxStart = idx;
2350 else
2351 break;
2352 }
2353 else
2354 {
2355 do
2356 idx++;
2357 while (idx < cValid && uSortKey == aTop[idx].uSortKey);
2358 break;
2359 }
2360 idx = idxStart + (idxEnd - idxStart) / 2;
2361 }
2362 AssertContinue(idx < RT_ELEMENTS(aTop));
2363
2364 /*
2365 * Shift entries as needed.
2366 */
2367 if (cValid >= cTop)
2368 {
2369 if (idx != cTop - 1U)
2370 memmove(&aTop[idx + 1], &aTop[idx], (cTop - idx - 1) * sizeof(aTop[0]));
2371 }
2372 else
2373 {
2374 if (idx != cValid)
2375 memmove(&aTop[idx + 1], &aTop[idx], (cValid - idx) * sizeof(aTop[0]));
2376 cValid++;
2377 }
2378 }
2379 else
2380 {
2381 /* Special case: The first insertion. */
2382 cValid = 1;
2383 idx = 0;
2384 }
2385
2386 /*
2387 * Fill in the new entry.
2388 */
2389 aTop[idx].uSortKey = uSortKey;
2390 aTop[idx].pTb = pTb;
2391 }
2392 }
2393 }
2394 }
2395 }
2396
2397 /*
2398 * Display the result.
2399 */
2400 if (cTop > cValid)
2401 cTop = cValid;
2402 pHlp->pfnPrintf(pHlp, "Displaying the top %u TBs for CPU #%u ordered by %s:\n",
2403 cTop, pVCpu->idCpu, enmTop == kTbTop_MostFrequentlyUsed ? "cUsed" : "msLastUsed");
2404 if (fDisassemble)
2405 pHlp->pfnPrintf(pHlp, "================================================================================\n");
2406
2407 for (uint32_t idx = 0; idx < cTop; idx++)
2408 {
2409 if (fDisassemble && idx)
2410 pHlp->pfnPrintf(pHlp, "\n------------------------------- %u -------------------------------\n", idx);
2411
2412 PCIEMTB const pTb = aTop[idx].pTb;
2413 size_t const idxTbChunk = pTb->idxAllocChunk;
2414 Assert(idxTbChunk < pTbAllocator->cAllocatedChunks);
2415 size_t const idxTbNo = (pTb - &pTbAllocator->aChunks[idxTbChunk].paTbs[0])
2416 + idxTbChunk * pTbAllocator->cTbsPerChunk;
2417 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2418 {
2419# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2420 case IEMTB_F_TYPE_NATIVE:
2421 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - native\n",
2422 idxTbNo, pTb->GCPhysPc, iemR3GetTbFlatPc(pTb), pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2423 if (fDisassemble)
2424 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2425 break;
2426# endif
2427
2428 case IEMTB_F_TYPE_THREADED:
2429 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - threaded\n",
2430 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2431 if (fDisassemble)
2432 iemThreadedDisassembleTb(pTb, pHlp);
2433 break;
2434
2435 default:
2436 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - ???\n",
2437 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2438 break;
2439 }
2440 }
2441}
2442
2443#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
2444
2445
2446#ifdef VBOX_WITH_DEBUGGER
2447
2448/** @callback_method_impl{FNDBGCCMD,
2449 * Implements the '.alliem' command. }
2450 */
2451static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2452{
2453 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
2454 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2455 if (pVCpu)
2456 {
2457 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
2458 return VINF_SUCCESS;
2459 }
2460 RT_NOREF(paArgs, cArgs);
2461 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
2462}
2463
2464
2465/**
2466 * Called by IEMR3Init to register debugger commands.
2467 */
2468static void iemR3RegisterDebuggerCommands(void)
2469{
2470 /*
2471 * Register debugger commands.
2472 */
2473 static DBGCCMD const s_aCmds[] =
2474 {
2475 {
2476 /* .pszCmd = */ "iemflushtlb",
2477 /* .cArgsMin = */ 0,
2478 /* .cArgsMax = */ 0,
2479 /* .paArgDescs = */ NULL,
2480 /* .cArgDescs = */ 0,
2481 /* .fFlags = */ 0,
2482 /* .pfnHandler = */ iemR3DbgFlushTlbs,
2483 /* .pszSyntax = */ "",
2484 /* .pszDescription = */ "Flushed the code and data TLBs"
2485 },
2486 };
2487
2488 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
2489 AssertLogRelRC(rc);
2490}
2491
2492#endif /* VBOX_WITH_DEBUGGER */
2493
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette