VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 106211

Last change on this file since 106211 was 106192, checked in by vboxsync, 4 months ago

VMM/IEM: Added some basic stats & debug info for postponed EFLAGS calcs. Moved debug info structures from IEMInternal.h and into IEMN8veRecompiler.h. bugref:10720

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 145.1 KB
Line 
1/* $Id: IEMR3.cpp 106192 2024-10-01 12:57:32Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39#if defined(VBOX_VMM_TARGET_ARMV8)
40# include "IEMInternal-armv8.h"
41#else
42# include "IEMInternal.h"
43#endif
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/vmapi.h>
46#include <VBox/err.h>
47#ifdef VBOX_WITH_DEBUGGER
48# include <VBox/dbg.h>
49#endif
50
51#include <iprt/assert.h>
52#include <iprt/getopt.h>
53#ifdef IEM_WITH_TLB_TRACE
54# include <iprt/mem.h>
55#endif
56#include <iprt/string.h>
57
58#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
59# include "IEMN8veRecompiler.h"
60# include "IEMThreadedFunctions.h"
61# include "IEMInline.h"
62#endif
63
64
65/*********************************************************************************************************************************
66* Internal Functions *
67*********************************************************************************************************************************/
68static FNDBGFINFOARGVINT iemR3InfoITlb;
69static FNDBGFINFOARGVINT iemR3InfoDTlb;
70#ifdef IEM_WITH_TLB_TRACE
71static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
72#endif
73#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
74static FNDBGFINFOARGVINT iemR3InfoTb;
75static FNDBGFINFOARGVINT iemR3InfoTbTop;
76#endif
77#ifdef VBOX_WITH_DEBUGGER
78static void iemR3RegisterDebuggerCommands(void);
79#endif
80
81
82#if !defined(VBOX_VMM_TARGET_ARMV8)
83static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
84{
85 switch (enmTargetCpu)
86 {
87#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
88 CASE_RET_STR(IEMTARGETCPU_8086);
89 CASE_RET_STR(IEMTARGETCPU_V20);
90 CASE_RET_STR(IEMTARGETCPU_186);
91 CASE_RET_STR(IEMTARGETCPU_286);
92 CASE_RET_STR(IEMTARGETCPU_386);
93 CASE_RET_STR(IEMTARGETCPU_486);
94 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
95 CASE_RET_STR(IEMTARGETCPU_PPRO);
96 CASE_RET_STR(IEMTARGETCPU_CURRENT);
97#undef CASE_RET_STR
98 default: return "Unknown";
99 }
100}
101#endif
102
103
104/**
105 * Initializes the interpreted execution manager.
106 *
107 * This must be called after CPUM as we're quering information from CPUM about
108 * the guest and host CPUs.
109 *
110 * @returns VBox status code.
111 * @param pVM The cross context VM structure.
112 */
113VMMR3DECL(int) IEMR3Init(PVM pVM)
114{
115 /*
116 * Read configuration.
117 */
118#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
119 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
120 int rc;
121#endif
122
123#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
124 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
125 * Controls whether the custom VBox specific CPUID host call interface is
126 * enabled or not. */
127# ifdef DEBUG_bird
128 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
129# else
130 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
131# endif
132 AssertLogRelRCReturn(rc, rc);
133#endif
134
135#ifdef VBOX_WITH_IEM_RECOMPILER
136 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
137 * Max number of TBs per EMT. */
138 uint32_t cMaxTbs = 0;
139 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
140 AssertLogRelRCReturn(rc, rc);
141 if (cMaxTbs < _16K || cMaxTbs > _8M)
142 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
143 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
144
145 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
146 * Initial (minimum) number of TBs per EMT in ring-3. */
147 uint32_t cInitialTbs = 0;
148 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
149 AssertLogRelRCReturn(rc, rc);
150 if (cInitialTbs < _16K || cInitialTbs > _8M)
151 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
152 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
153
154 /* Check that the two values makes sense together. Expect user/api to do
155 the right thing or get lost. */
156 if (cInitialTbs > cMaxTbs)
157 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
158 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
159 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
160
161 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
162 * Max executable memory for recompiled code per EMT. */
163 uint64_t cbMaxExec = 0;
164 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
165 AssertLogRelRCReturn(rc, rc);
166 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
167 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
168 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
169 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
170
171 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
172 * The executable memory allocator chunk size. */
173 uint32_t cbChunkExec = 0;
174 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
175 AssertLogRelRCReturn(rc, rc);
176 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
177 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
178 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
179 cbChunkExec, cbChunkExec, _1M, _256M);
180
181 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
182 * The initial executable memory allocator size (per EMT). The value is
183 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
184 uint64_t cbInitialExec = 0;
185 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
186 AssertLogRelRCReturn(rc, rc);
187 if (cbInitialExec > cbMaxExec)
188 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
189 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
190 cbInitialExec, cbInitialExec, cbMaxExec);
191
192 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
193 * The translation block use count value to do native recompilation at.
194 * Set to zero to disable native recompilation. */
195 uint32_t uTbNativeRecompileAtUsedCount = 16;
196 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
197 AssertLogRelRCReturn(rc, rc);
198
199#endif /* VBOX_WITH_IEM_RECOMPILER*/
200
201 /*
202 * Initialize per-CPU data and register statistics.
203 */
204#if 1
205 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
206 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
207#else
208 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
209 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
210#endif
211
212 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
213 {
214 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
215 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
216
217 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
218#ifndef VBOX_VMM_TARGET_ARMV8
219 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
220#endif
221 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
222#ifndef VBOX_VMM_TARGET_ARMV8
223 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
224 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
225 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
226 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
227#endif
228
229#ifndef VBOX_VMM_TARGET_ARMV8
230 pVCpu->iem.s.cTbsTillNextTimerPoll = 128;
231 pVCpu->iem.s.cTbsTillNextTimerPollPrev = 128;
232#endif
233
234 /*
235 * Host and guest CPU information.
236 */
237 if (idCpu == 0)
238 {
239 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
240 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
241#if !defined(VBOX_VMM_TARGET_ARMV8)
242 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
243 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
244 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
245# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
246 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
247 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
248 else
249# endif
250 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
251#else
252 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
253 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
254#endif
255
256#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
257 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
258 {
259 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
260 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
261 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
262 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
263 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
264 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
265 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
266 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
267 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
268 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
269 }
270 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
271 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
272 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
273#else
274 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
275 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
276 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
277#endif
278 }
279 else
280 {
281 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
282 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
283 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
284 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
285#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
286 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
287#endif
288 }
289
290 /*
291 * Mark all buffers free.
292 */
293 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
294 while (iMemMap-- > 0)
295 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
296
297#ifdef VBOX_WITH_IEM_RECOMPILER
298 /*
299 * Recompiler state and configuration distribution.
300 */
301 pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
302 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
303#endif
304
305#ifdef IEM_WITH_TLB_TRACE
306 /*
307 * Allocate trace buffer.
308 */
309 pVCpu->iem.s.idxTlbTraceEntry = 0;
310 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
311 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
312 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
313 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
314#endif
315 }
316
317
318#ifdef VBOX_WITH_IEM_RECOMPILER
319 /*
320 * Initialize the TB allocator and cache (/ hash table).
321 *
322 * This is done by each EMT to try get more optimal thread/numa locality of
323 * the allocations.
324 */
325 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
326 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
327 AssertLogRelRCReturn(rc, rc);
328#endif
329
330 /*
331 * Register statistics.
332 */
333 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
334 {
335#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
336 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
337 char szPat[128];
338 RT_NOREF_PV(szPat); /* lazy bird */
339 char szVal[128];
340 RT_NOREF_PV(szVal); /* lazy bird */
341
342 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
343 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
344 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
345 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
346 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
347 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
348 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
349 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
350 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
351 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
352 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
353 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
354 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
355 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
356 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
357 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
358 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
359 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
360 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
361 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
362
363 /* Code TLB: */
364 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
365 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
366 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
367 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
368 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
369 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
370 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
371 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
372 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
373 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
374
375 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
376 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
377 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
378 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
379 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
380 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
381
382 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
383 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
384 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
385 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
386 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
387 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
388
389 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
390 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
391 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
392 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
393 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
394 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
395
396 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
397 "Code TLB page invalidation requests", "/IEM/CPU%u/Tlb/Code/InvlPg", idCpu);
398 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
399 "Code TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeGlobal", idCpu);
400 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
401 "Code TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeNonGlobal", idCpu);
402
403 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
404 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
405 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
406 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
407 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
408 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
409# ifdef IEM_WITH_TLB_STATISTICS
410 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
411 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
412# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
413 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
414 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
415 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
416 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
417# endif
418
419 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
420 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
421 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
422
423 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
424 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
425 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
426
427 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
428 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
429 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
430 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
431
432# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
433 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
434 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
435 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
436 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
437 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
438 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
439 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
440 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
441 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
442 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
443 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
444 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
445 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
446 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
447 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
448
449 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
450 "Code TLB native misses on new page",
451 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
452 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
453 "Code TLB native misses on new page w/ offset",
454 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
455# endif
456# endif /* IEM_WITH_TLB_STATISTICS */
457
458 /* Data TLB organized as best we can... */
459 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
460 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
461 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
462 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
463 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
464 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
465 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
466 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
467 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
468 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
469
470 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
471 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
472 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
473 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
474 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
475 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
476
477 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
478 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
479 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
480 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
481 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
482 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
483
484 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
485 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
486 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
487 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
488 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
489 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
490
491 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
492 "Data TLB page invalidation requests", "/IEM/CPU%u/Tlb/Data/InvlPg", idCpu);
493 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
494 "Data TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeGlobal", idCpu);
495 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
496 "Data TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeNonGlobal", idCpu);
497
498 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
499 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
500 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
501 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
502 "Data TLB global loads",
503 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
504 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
505 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
506 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
507 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
508 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
509 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
510 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
511 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
512 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
513
514 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
515 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
516 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
517 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
518 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
519 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
520 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
521 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
522 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
523 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
524 "Data TLB global loads",
525 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
526
527# ifdef IEM_WITH_TLB_STATISTICS
528# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
529 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
530 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
531 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
532 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
533 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
534 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
535 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
536 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
537 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
538 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
539 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
540 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
541 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
542 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
543 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
544# endif
545# endif
546
547# ifdef IEM_WITH_TLB_STATISTICS
548 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
549 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
550 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
551 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
552 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
553 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
554# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
555 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
556 "Data TLB native stack access hits",
557 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
558 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
559 "Data TLB native data fetch hits",
560 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
561 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
562 "Data TLB native data store hits",
563 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
564 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
565 "Data TLB native mapped data hits",
566 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
567# endif
568 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
569 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
570 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
571
572# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
573 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
574 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
575 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
576# endif
577
578 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
579 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
580 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
581
582 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
583 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
584 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
585 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
586
587# endif /* IEM_WITH_TLB_STATISTICS */
588
589
590#ifdef VBOX_WITH_IEM_RECOMPILER
591 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
592 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
593 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
594 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
595 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
596 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
597# ifdef VBOX_WITH_STATISTICS
598 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
599 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
600 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
601 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
602# endif
603
604# ifdef VBOX_WITH_STATISTICS
605 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPoll, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
606 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll", idCpu);
607 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollRun, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
608 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll/Running", idCpu);
609 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollUnchanged, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
610 "Timer polling interval unchanged", "/IEM/CPU%u/re/TimerPoll/Unchanged", idCpu);
611 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollTiny, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
612 "Timer polling interval tiny", "/IEM/CPU%u/re/TimerPoll/Tiny", idCpu);
613 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollDefaultCalc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
614 "Timer polling interval calculated using defaults", "/IEM/CPU%u/re/TimerPoll/DefaultCalc", idCpu);
615 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollMax, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
616 "Timer polling interval maxed out", "/IEM/CPU%u/re/TimerPoll/Max", idCpu);
617 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorDivision, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE,
618 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorDivision", idCpu);
619 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorMultiplication, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
620 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorMultiplication", idCpu);
621# endif
622 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbsTillNextTimerPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
623 "Timer polling interval (in TBs)", "/IEM/CPU%u/re/TimerPollInterval", idCpu);
624
625 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
626 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
627 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
628 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
629 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
630# ifdef VBOX_WITH_STATISTICS
631 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
632 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
633# endif
634 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
635 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
636 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
637 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
638 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
639 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
640 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
641 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
642 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
643 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
644 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
645 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
646 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
647 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
648 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
649 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
650
651 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
652 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
653 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
654
655 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
656 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
657 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
658 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
659 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
660 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
661 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
662 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
663# ifdef VBOX_WITH_STATISTICS
664 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
665 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
666# endif
667
668 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
669 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
670 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
671 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
672 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
673 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
674
675 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
676 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
677 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckTimersBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
678 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckTimersBreaks", idCpu);
679 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
680 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
681 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
682 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
683 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
684 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
685
686 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
687 "Detected loop full TB", "/IEM/CPU%u/re/LoopFullTbDetected", idCpu);
688 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected2, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
689 "Detected loop full TB but looping back to before the first TB instruction",
690 "/IEM/CPU%u/re/LoopFullTbDetected2", idCpu);
691 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
692 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
693
694 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
695 "Number of times the exec memory allocator failed to allocate a large enough buffer",
696 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
697
698 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
699 "Number of threaded calls per TB that have been properly recompiled to native code",
700 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
701 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
702 "Number of threaded calls per TB that could not be recompiler to native code",
703 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
704 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
705 "Number of threaded calls that could not be recompiler to native code",
706 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
707
708 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
709 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
710 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
711 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
712
713# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
714# ifdef VBOX_WITH_STATISTICS
715 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
716 "Number of calls to iemNativeRegAllocFindFree.",
717 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
718# endif
719 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
720 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
721 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
722# ifdef VBOX_WITH_STATISTICS
723 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
724 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
725 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
726 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
727 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
728 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
729 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
730 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
731 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
732
733# define REG_NATIVE_EFL_GROUP(a_Lower, a_Camel) do { \
734 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponed ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
735 "Postponed all status flag updating, " #a_Lower " instructions", \
736 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
737 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkipped ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
738 "Skipped all status flag updating, " #a_Lower " instructions", \
739 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
740 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflTotal ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
741 "Total number of " #a_Lower " intructions with status flag updating", \
742 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
743 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
744 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
745 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
746 "Postponed all status flag updating, " #a_Lower " instructions, percentage", \
747 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "PostponedPct", idCpu); \
748 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
749 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
750 "Skipped all status flag updating, " #a_Lower " instructions, percentage", \
751 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "SkippedPct", idCpu); \
752 } while (0)
753 REG_NATIVE_EFL_GROUP(arithmetic, Arithmetic);
754 REG_NATIVE_EFL_GROUP(logical, Logical);
755 REG_NATIVE_EFL_GROUP(shift, Shift);
756# undef REG_NATIVE_EFL_GROUP
757
758 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponedEmits, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
759 "Postponed EFLAGS calculation emits", "/IEM/CPU%u/re/NativeEFlags/ZZEmits", idCpu);
760
761 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
762 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippable", idCpu);
763 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippable", idCpu);
764 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippable", idCpu);
765 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippable", idCpu);
766 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippable", idCpu);
767
768 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfRequired", idCpu);
769 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfRequired", idCpu);
770 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfRequired", idCpu);
771 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfRequired", idCpu);
772 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfRequired", idCpu);
773 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfRequired", idCpu);
774
775# ifdef IEMLIVENESS_EXTENDED_LAYOUT
776 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfDelayable", idCpu);
777 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfDelayable", idCpu);
778 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfDelayable", idCpu);
779 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfDelayable", idCpu);
780 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfDelayable", idCpu);
781 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfDelayable", idCpu);
782# endif
783
784 /* Sum up all status bits ('_' is a sorting hack). */
785 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fSkippable*", idCpu);
786 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
787 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
788
789 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fRequired*", idCpu);
790 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
791 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
792
793# ifdef IEMLIVENESS_EXTENDED_LAYOUT
794 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fDelayable*", idCpu);
795 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
796 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
797# endif
798
799 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?f*", idCpu);
800 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
801 "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
802
803 /* Corresponding ratios / percentages of the totals. */
804 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
805 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
806 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
807 "Total skippable EFLAGS status bit updating percentage",
808 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippablePct", idCpu);
809
810 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
811 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
812 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
813 "Total required EFLAGS status bit updating percentage",
814 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequiredPct", idCpu);
815
816# ifdef IEMLIVENESS_EXTENDED_LAYOUT
817 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
818 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
819 "Total potentially delayable EFLAGS status bit updating percentage",
820 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayablePct", idCpu);
821# endif
822
823 /* Ratios of individual bits. */
824 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/Cf*", idCpu) - 3;
825 Assert(szPat[offFlagChar] == 'C');
826 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
827 Assert(szVal[offFlagChar] == 'C');
828 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippablePct", idCpu);
829 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippablePct", idCpu);
830 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippablePct", idCpu);
831 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippablePct", idCpu);
832 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippablePct", idCpu);
833 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippablePct", idCpu);
834
835 /* PC updates total and skipped, with PCT ratio. */
836 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
837 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
838 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
839 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
840 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
841 "Delayed RIP updating percentage",
842 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
843
844# endif /* VBOX_WITH_STATISTICS */
845# ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
846 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEndIfOtherBranchDirty, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
847 "IEM_MC_ENDIF flushing dirty shadow registers for other branch (not good).",
848 "/IEM/CPU%u/re/NativeEndIfOtherBranchDirty", idCpu);
849# endif
850# ifdef VBOX_WITH_STATISTICS
851# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
852 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
853 "Number of calls to iemNativeSimdRegAllocFindFree.",
854 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
855 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
856 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
857 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
858 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
859 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
860 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
861 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
862 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
863 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
864 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
865 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
866 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
867
868 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
869 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
870 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
871 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
872 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
873 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
874 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
875 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
876
877 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
878 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
879 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
880 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
881 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
882 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
883 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
884 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
885# endif
886
887 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
888 "Number of times the TB finishes execution completely",
889 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
890# endif /* VBOX_WITH_STATISTICS */
891 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
892 "Number of times the TB finished through the ReturnBreak label",
893 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
894 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
895 "Number of times the TB finished through the ReturnBreak label",
896 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
897 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
898 "Number of times the TB finished through the ReturnWithFlags label",
899 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
900 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
901 "Number of times the TB finished with some other status value",
902 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
903 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
904 "Number of times the TB finished via long jump / throw",
905 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
906 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
907 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
908 "Number of times the TB finished through the ObsoleteTb label",
909 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
910 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
911 "Number of times the TB finished through the NeedCsLimChecking label",
912 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
913 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
914 "Number of times the TB finished through the CheckBranchMiss label",
915 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
916 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
917 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
918# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
919# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
920# else
921# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
922# endif
923 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
924 "Number of times the TB finished raising a #DE exception",
925 RAISE_PREFIX "RaiseDe", idCpu);
926 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
927 "Number of times the TB finished raising a #UD exception",
928 RAISE_PREFIX "RaiseUd", idCpu);
929 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
930 "Number of times the TB finished raising a SSE related exception",
931 RAISE_PREFIX "RaiseSseRelated", idCpu);
932 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
933 "Number of times the TB finished raising a AVX related exception",
934 RAISE_PREFIX "RaiseAvxRelated", idCpu);
935 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
936 "Number of times the TB finished raising a SSE/AVX floating point related exception",
937 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
938 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
939 "Number of times the TB finished raising a #NM exception",
940 RAISE_PREFIX "RaiseNm", idCpu);
941 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
942 "Number of times the TB finished raising a #GP(0) exception",
943 RAISE_PREFIX "RaiseGp0", idCpu);
944 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
945 "Number of times the TB finished raising a #MF exception",
946 RAISE_PREFIX "RaiseMf", idCpu);
947 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
948 "Number of times the TB finished raising a #XF exception",
949 RAISE_PREFIX "RaiseXf", idCpu);
950
951# ifdef VBOX_WITH_STATISTICS
952 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLoopFullTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
953 "Number of full TB loops.",
954 "/IEM/CPU%u/re/NativeTbExit/LoopFullTb", idCpu);
955# endif
956
957 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
958 "Direct linking #1 with IRQ check succeeded",
959 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
960 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
961 "Direct linking #1 w/o IRQ check succeeded",
962 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
963# ifdef VBOX_WITH_STATISTICS
964 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
965 "Direct linking #1 failed: No TB in lookup table",
966 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
967 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
968 "Direct linking #1 failed: GCPhysPc mismatch",
969 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
970 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
971 "Direct linking #1 failed: TB flags mismatch",
972 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
973 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
974 "Direct linking #1 failed: IRQ or FF pending",
975 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
976# endif
977
978 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
979 "Direct linking #2 with IRQ check succeeded",
980 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
981 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
982 "Direct linking #2 w/o IRQ check succeeded",
983 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
984# ifdef VBOX_WITH_STATISTICS
985 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
986 "Direct linking #2 failed: No TB in lookup table",
987 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
988 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
989 "Direct linking #2 failed: GCPhysPc mismatch",
990 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
991 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
992 "Direct linking #2 failed: TB flags mismatch",
993 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
994 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
995 "Direct linking #2 failed: IRQ or FF pending",
996 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
997# endif
998
999 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
1000 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
1001 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
1002 "/IEM/CPU%u/re/NativeTbExit", idCpu);
1003
1004
1005# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
1006
1007
1008# ifdef VBOX_WITH_STATISTICS
1009 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1010 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
1011 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1012 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
1013 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1014 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
1015 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1016 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
1017# endif
1018
1019
1020#endif /* VBOX_WITH_IEM_RECOMPILER */
1021
1022 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
1023 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1024 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
1025 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
1026 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1027 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
1028
1029# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
1030 /* Instruction statistics: */
1031# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
1032 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1033 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
1034 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1035 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
1036# include "IEMInstructionStatisticsTmpl.h"
1037# undef IEM_DO_INSTR_STAT
1038# endif
1039
1040# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1041 /* Threaded function statistics: */
1042 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
1043 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
1044 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
1045# endif
1046
1047#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
1048 }
1049
1050#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1051 /*
1052 * Register the per-VM VMX APIC-access page handler type.
1053 */
1054 if (pVM->cpum.ro.GuestFeatures.fVmx)
1055 {
1056 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
1057 iemVmxApicAccessPageHandler,
1058 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
1059 AssertLogRelRCReturn(rc, rc);
1060 }
1061#endif
1062
1063 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1064 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1065#ifdef IEM_WITH_TLB_TRACE
1066 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
1067#endif
1068#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1069 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
1070 DBGFR3InfoRegisterInternalArgv(pVM, "tbtop", "IEM translation blocks most used or most recently used",
1071 iemR3InfoTbTop, DBGFINFO_FLAGS_RUN_ON_EMT);
1072#endif
1073#ifdef VBOX_WITH_DEBUGGER
1074 iemR3RegisterDebuggerCommands();
1075#endif
1076
1077 return VINF_SUCCESS;
1078}
1079
1080
1081VMMR3DECL(int) IEMR3Term(PVM pVM)
1082{
1083 NOREF(pVM);
1084#ifdef IEM_WITH_TLB_TRACE
1085 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1086 {
1087 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1088 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1089 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1090 }
1091#endif
1092 return VINF_SUCCESS;
1093}
1094
1095
1096VMMR3DECL(void) IEMR3Relocate(PVM pVM)
1097{
1098 RT_NOREF(pVM);
1099}
1100
1101
1102/**
1103 * Gets the name of a generic IEM exit code.
1104 *
1105 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1106 * @param uExit The IEM exit to name.
1107 */
1108VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1109{
1110 static const char * const s_apszNames[] =
1111 {
1112 /* external interrupts */
1113 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1114 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1115 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1116 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1117 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1118 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1119 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1120 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1121 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1122 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1123 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1124 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1125 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1126 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1127 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1128 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1129 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1130 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1131 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1132 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1133 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1134 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1135 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1136 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1137 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1138 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1139 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1140 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1141 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1142 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1143 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1144 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1145 /* software interrups */
1146 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1147 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1148 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1149 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1150 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1151 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1152 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1153 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1154 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1155 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1156 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1157 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1158 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1159 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1160 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1161 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1162 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1163 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1164 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1165 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1166 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1167 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1168 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1169 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1170 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1171 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1172 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1173 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1174 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1175 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1176 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1177 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1178 };
1179 if (uExit < RT_ELEMENTS(s_apszNames))
1180 return s_apszNames[uExit];
1181 return NULL;
1182}
1183
1184
1185/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1186static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1187{
1188 if (*pfHeader)
1189 return;
1190 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1191 *pfHeader = true;
1192}
1193
1194
1195#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1196#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1197
1198/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1199static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1200 uint32_t uSlot, uint32_t fFlags)
1201{
1202#ifndef VBOX_VMM_TARGET_ARMV8
1203 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1204#else
1205 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1206#endif
1207 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1208 return;
1209
1210 /* The address needs to be sign extended, thus the shifting fun here.*/
1211 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1212 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1213 const char *pszValid = "";
1214#ifndef VBOX_VMM_TARGET_ARMV8
1215 char szTmp[128];
1216 if (fFlags & IEMR3INFOTLB_F_CHECK)
1217 {
1218 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1219 PGMPTWALKFAST WalkFast;
1220 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1221 pszValid = szTmp;
1222 if (RT_FAILURE(rc))
1223 switch (rc)
1224 {
1225 case VERR_PAGE_TABLE_NOT_PRESENT:
1226 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1227 {
1228 case 1: pszValid = " stale(page-not-present)"; break;
1229 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1230 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1231 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1232 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1233 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1234 }
1235 break;
1236 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1237 }
1238 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1239 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1240 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1241 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1242 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1243 | fInvSlotG ) )
1244 pszValid = " still-valid";
1245 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1246 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1247 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1248 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1249 {
1250 case X86_PTE_A:
1251 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1252 break;
1253 case X86_PTE_D:
1254 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1255 break;
1256 case X86_PTE_D | X86_PTE_A:
1257 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1258 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1259 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1260 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1261 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1262 break;
1263 default: AssertFailed(); break;
1264 }
1265 else
1266 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1267 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1268 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1269 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1270 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1271 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1272 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1273 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1274 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1275 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1276 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1277 }
1278#else
1279 RT_NOREF(pVCpu);
1280#endif
1281
1282 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1283 uSlot,
1284 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1285 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1286 : "expired",
1287 GCPtr, /* -> */
1288 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1289 /* / */
1290 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1291 /* */
1292 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1293 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1294 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1295 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1296 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1297 !(uSlot & 1) ? "-" : "G",
1298 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1299 /* / */
1300 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1301 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1302 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1303 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1304 /* / */
1305 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1306 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1307 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1308 pszValid);
1309}
1310
1311
1312/** Displays one or more TLB slots. */
1313static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1314 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1315{
1316 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1317 {
1318 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1319 {
1320 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1321 cSlots, RT_ELEMENTS(pTlb->aEntries));
1322 cSlots = RT_ELEMENTS(pTlb->aEntries);
1323 }
1324
1325 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1326 while (cSlots-- > 0)
1327 {
1328 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1329 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1330 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1331 }
1332 }
1333 else
1334 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1335 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1336}
1337
1338
1339/** Displays the TLB slot for the given address. */
1340static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1341 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1342{
1343 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1344
1345 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(uAddress);
1346#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1347 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1348#else
1349 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1350#endif
1351 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1352#ifndef VBOX_VMM_TARGET_ARMV8
1353 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1354#endif
1355 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1356 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1357 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1358 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1359
1360#ifndef VBOX_VMM_TARGET_ARMV8
1361 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1362 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1363 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1364 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1365#endif
1366}
1367
1368
1369/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1370static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1371{
1372 /*
1373 * This is entirely argument driven.
1374 */
1375 static RTGETOPTDEF const s_aOptions[] =
1376 {
1377 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1378 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1379 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1380 { "all", 'A', RTGETOPT_REQ_NOTHING },
1381 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1382 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1383 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1384 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1385 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1386 };
1387
1388 RTGETOPTSTATE State;
1389 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1390 AssertRCReturnVoid(rc);
1391
1392 uint32_t cActionArgs = 0;
1393 bool fNeedHeader = true;
1394 bool fAddressMode = true;
1395 uint32_t fFlags = 0;
1396 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1397 PVMCPU pVCpu = pVCpuCall;
1398 if (!pVCpu)
1399 pVCpu = VMMGetCpuById(pVM, 0);
1400
1401 RTGETOPTUNION ValueUnion;
1402 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1403 {
1404 switch (rc)
1405 {
1406 case 'c':
1407 if (ValueUnion.u32 >= pVM->cCpus)
1408 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1409 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1410 {
1411 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1412 fNeedHeader = true;
1413 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1414 {
1415 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1416 ValueUnion.u32, pVCpuCall->idCpu);
1417 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1418 }
1419 }
1420 break;
1421
1422 case 'C':
1423 if (!pVCpuCall)
1424 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1425 else if (pVCpu != pVCpuCall)
1426 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1427 pVCpu->idCpu, pVCpuCall->idCpu);
1428 else
1429 fFlags |= IEMR3INFOTLB_F_CHECK;
1430 break;
1431
1432 case 'a':
1433 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1434 ValueUnion.u64, fFlags, &fNeedHeader);
1435 fAddressMode = true;
1436 cActionArgs++;
1437 break;
1438
1439 case 'A':
1440 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1441 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1442 cActionArgs++;
1443 break;
1444
1445 case 'r':
1446 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1447 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1448 fAddressMode = false;
1449 cActionArgs++;
1450 break;
1451
1452 case 's':
1453 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1454 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1455 fAddressMode = false;
1456 cActionArgs++;
1457 break;
1458
1459 case 'v':
1460 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1461 break;
1462
1463 case VINF_GETOPT_NOT_OPTION:
1464 if (fAddressMode)
1465 {
1466 uint64_t uAddr;
1467 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1468 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1469 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1470 uAddr, fFlags, &fNeedHeader);
1471 else
1472 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1473 }
1474 else
1475 {
1476 uint32_t uSlot;
1477 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1478 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1479 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1480 uSlot, 1, fFlags, &fNeedHeader);
1481 else
1482 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1483 }
1484 cActionArgs++;
1485 break;
1486
1487 case 'h':
1488 pHlp->pfnPrintf(pHlp,
1489 "Usage: info %ctlb [options]\n"
1490 "\n"
1491 "Options:\n"
1492 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1493 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1494 " -C,--check\n"
1495 " Check valid entries against guest PTs.\n"
1496 " -A, --all, all\n"
1497 " Display all the TLB entries (default if no other args).\n"
1498 " -a<virt>, --address=<virt>\n"
1499 " Shows the TLB entry for the specified guest virtual address.\n"
1500 " -r<slot:count>, --range=<slot:count>\n"
1501 " Shows the TLB entries for the specified slot range.\n"
1502 " -s<slot>,--slot=<slot>\n"
1503 " Shows the given TLB slot.\n"
1504 " -v,--only-valid\n"
1505 " Only show valid TLB entries (TAG, not phys)\n"
1506 "\n"
1507 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1508 "defaulting to addresses if not preceeded by any of those options.\n"
1509 , fITlb ? 'i' : 'd');
1510 return;
1511
1512 default:
1513 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1514 return;
1515 }
1516 }
1517
1518 /*
1519 * If no action taken, we display all (-A) by default.
1520 */
1521 if (!cActionArgs)
1522 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1523 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1524}
1525
1526
1527/**
1528 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1529 */
1530static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1531{
1532 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1533}
1534
1535
1536/**
1537 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1538 */
1539static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1540{
1541 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1542}
1543
1544
1545#ifdef IEM_WITH_TLB_TRACE
1546/**
1547 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1548 */
1549static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1550{
1551 /*
1552 * Parse arguments.
1553 */
1554 static RTGETOPTDEF const s_aOptions[] =
1555 {
1556 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1557 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1558 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1559 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1560 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1561 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1562 };
1563
1564 RTGETOPTSTATE State;
1565 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1566 AssertRCReturnVoid(rc);
1567
1568 uint32_t cLimit = UINT32_MAX;
1569 bool fStopAtGlobalFlush = false;
1570 bool fResolveRip = false;
1571 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1572 PVMCPU pVCpu = pVCpuCall;
1573 if (!pVCpu)
1574 pVCpu = VMMGetCpuById(pVM, 0);
1575
1576 RTGETOPTUNION ValueUnion;
1577 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1578 {
1579 switch (rc)
1580 {
1581 case 'c':
1582 if (ValueUnion.u32 >= pVM->cCpus)
1583 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1584 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1585 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1586 break;
1587
1588 case 'l':
1589 cLimit = ValueUnion.u32;
1590 break;
1591
1592 case 'g':
1593 fStopAtGlobalFlush = true;
1594 break;
1595
1596 case 'r':
1597 fResolveRip = true;
1598 break;
1599
1600 case 'h':
1601 pHlp->pfnPrintf(pHlp,
1602 "Usage: info tlbtrace [options] [n]\n"
1603 "\n"
1604 "Options:\n"
1605 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1606 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1607 " [n], -l<n>, --last=<n>\n"
1608 " Limit display to the last N entries. Default: all\n"
1609 " -g, --stop-at-global-flush\n"
1610 " Stop after the first global flush entry.\n"
1611 " -r, --resolve-rip\n"
1612 " Resolve symbols for the flattened RIP addresses.\n"
1613 );
1614 return;
1615
1616 case VINF_GETOPT_NOT_OPTION:
1617 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1618 if (RT_SUCCESS(rc))
1619 break;
1620 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1621 return;
1622
1623 default:
1624 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1625 return;
1626 }
1627 }
1628
1629 /*
1630 * Get the details.
1631 */
1632 AssertReturnVoid(pVCpu);
1633 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1634 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1635 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1636 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1637 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1638 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1639 if (cLeft && paEntries)
1640 {
1641 /*
1642 * Display the entries.
1643 */
1644 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1645 while (cLeft-- > 0)
1646 {
1647 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1648 const char *pszSymbol = "";
1649 union
1650 {
1651 RTDBGSYMBOL Symbol;
1652 char ach[sizeof(RTDBGSYMBOL) + 32];
1653 } uBuf;
1654 if (fResolveRip)
1655 {
1656 RTGCINTPTR offDisp = 0;
1657 DBGFADDRESS Addr;
1658 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1659 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1660 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1661 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1662 &offDisp, &uBuf.Symbol, NULL);
1663 if (RT_SUCCESS(rc))
1664 {
1665 /* Add displacement. */
1666 if (offDisp)
1667 {
1668 size_t const cchName = strlen(uBuf.Symbol.szName);
1669 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1670 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1671 if (offDisp > 0)
1672 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1673 else
1674 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1675 }
1676
1677 /* Put a space before it. */
1678 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1679 char *pszName = uBuf.Symbol.szName;
1680 *--pszName = ' ';
1681 pszSymbol = pszName;
1682 }
1683 }
1684 static const char *s_apszTlbType[2] = { "code", "data" };
1685 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1686 switch (pCur->enmType)
1687 {
1688 case kIemTlbTraceType_InvlPg:
1689 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1690 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param), pszSymbol);
1691 break;
1692 case kIemTlbTraceType_EvictSlot:
1693 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1694 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1695 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1696 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1697 pCur->u64Param2, pszSymbol);
1698 break;
1699 case kIemTlbTraceType_LargeEvictSlot:
1700 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1701 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1702 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1703 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1704 pCur->u64Param2, pszSymbol);
1705 break;
1706 case kIemTlbTraceType_LargeScan:
1707 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1708 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1709 break;
1710
1711 case kIemTlbTraceType_Flush:
1712 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1713 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1714 break;
1715 case kIemTlbTraceType_FlushGlobal:
1716 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1717 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1718 if (fStopAtGlobalFlush)
1719 return;
1720 break;
1721 case kIemTlbTraceType_Load:
1722 case kIemTlbTraceType_LoadGlobal:
1723 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1724 idx, pCur->rip,
1725 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1726 pCur->u64Param,
1727 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)
1728 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1729 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1730 break;
1731
1732 case kIemTlbTraceType_Load_Cr0:
1733 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1734 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1735 break;
1736 case kIemTlbTraceType_Load_Cr3:
1737 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1738 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1739 break;
1740 case kIemTlbTraceType_Load_Cr4:
1741 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1742 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1743 break;
1744 case kIemTlbTraceType_Load_Efer:
1745 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1746 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1747 break;
1748
1749 case kIemTlbTraceType_Irq:
1750 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1751 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1752 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1753 pszSymbol);
1754 break;
1755 case kIemTlbTraceType_Xcpt:
1756 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1757 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1758 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1759 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1760 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1761 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1762 else
1763 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1764 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1765 break;
1766 case kIemTlbTraceType_IRet:
1767 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1768 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1769 break;
1770
1771 case kIemTlbTraceType_Tb_Compile:
1772 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1773 idx, pCur->rip, pCur->u64Param, pszSymbol);
1774 break;
1775 case kIemTlbTraceType_Tb_Exec_Threaded:
1776 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1777 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1778 break;
1779 case kIemTlbTraceType_Tb_Exec_Native:
1780 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1781 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1782 break;
1783
1784 case kIemTlbTraceType_User0:
1785 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1786 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1787 break;
1788 case kIemTlbTraceType_User1:
1789 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1790 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1791 break;
1792 case kIemTlbTraceType_User2:
1793 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1794 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1795 break;
1796 case kIemTlbTraceType_User3:
1797 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1798 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1799 break;
1800
1801 case kIemTlbTraceType_Invalid:
1802 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1803 break;
1804 }
1805 }
1806 }
1807 else
1808 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1809}
1810#endif /* IEM_WITH_TLB_TRACE */
1811
1812#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1813
1814/**
1815 * Get get compile time flat PC for the TB.
1816 */
1817DECL_FORCE_INLINE(RTGCPTR) iemR3GetTbFlatPc(PCIEMTB pTb)
1818{
1819#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
1820 if (pTb->fFlags & IEMTB_F_TYPE_NATIVE)
1821 {
1822 PCIEMTBDBG const pDbgInfo = pTb->pDbgInfo;
1823 return pDbgInfo ? pDbgInfo->FlatPc : RTGCPTR_MAX;
1824 }
1825#endif
1826 return pTb->FlatPc;
1827}
1828
1829
1830/**
1831 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1832 */
1833static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1834{
1835 /*
1836 * Parse arguments.
1837 */
1838 static RTGETOPTDEF const s_aOptions[] =
1839 {
1840 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1841 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1842 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1843 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1844 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1845 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1846 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1847 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1848 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1849 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1850 { "--tb", 't', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1851 { "--tb-id", 't', RTGETOPT_REQ_UINT32 },
1852 };
1853
1854 RTGETOPTSTATE State;
1855 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1856 AssertRCReturnVoid(rc);
1857
1858 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1859 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1860 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1861 RTGCPHYS GCVirt = NIL_RTGCPTR;
1862 uint32_t fFlags = UINT32_MAX;
1863 uint32_t idTb = UINT32_MAX;
1864
1865 RTGETOPTUNION ValueUnion;
1866 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1867 {
1868 switch (rc)
1869 {
1870 case 'c':
1871 if (ValueUnion.u32 >= pVM->cCpus)
1872 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1873 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1874 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1875 break;
1876
1877 case 'a':
1878 GCVirt = ValueUnion.u64;
1879 GCPhysPc = NIL_RTGCPHYS;
1880 idTb = UINT32_MAX;
1881 break;
1882
1883 case 'p':
1884 GCVirt = NIL_RTGCPHYS;
1885 GCPhysPc = ValueUnion.u64;
1886 idTb = UINT32_MAX;
1887 break;
1888
1889 case 'f':
1890 fFlags = ValueUnion.u32;
1891 break;
1892
1893 case 't':
1894 GCVirt = NIL_RTGCPHYS;
1895 GCPhysPc = NIL_RTGCPHYS;
1896 idTb = ValueUnion.u32;
1897 break;
1898
1899 case VINF_GETOPT_NOT_OPTION:
1900 {
1901 if ( (ValueUnion.psz[0] == 'T' || ValueUnion.psz[0] == 't')
1902 && (ValueUnion.psz[1] == 'B' || ValueUnion.psz[1] == 'b')
1903 && ValueUnion.psz[2] == '#')
1904 {
1905 rc = RTStrToUInt32Full(&ValueUnion.psz[3], 0, &idTb);
1906 if (RT_SUCCESS(rc))
1907 {
1908 GCVirt = NIL_RTGCPHYS;
1909 GCPhysPc = NIL_RTGCPHYS;
1910 break;
1911 }
1912 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to TD ID: %Rrc\n", ValueUnion.psz, rc);
1913 }
1914 else
1915 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1916 return;
1917 }
1918
1919 case 'h':
1920 pHlp->pfnPrintf(pHlp,
1921 "Usage: info tb [options]\n"
1922 "\n"
1923 "Options:\n"
1924 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1925 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1926 " -a<virt>, --address=<virt>\n"
1927 " Shows the TB for the specified guest virtual address.\n"
1928 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1929 " Shows the TB for the specified guest physical address.\n"
1930 " -t<id>, --tb=<id>, --tb-id=<id>, TD#<id>\n"
1931 " Show the TB specified by the identifier/number (from tbtop).\n"
1932 " -f<flags>,--flags=<flags>\n"
1933 " The TB flags value (hex) to use when looking up the TB.\n"
1934 "\n"
1935 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1936 return;
1937
1938 default:
1939 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1940 return;
1941 }
1942 }
1943
1944 /* Currently, only do work on the same EMT. */
1945 if (pVCpu != pVCpuThis)
1946 {
1947 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1948 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1949 return;
1950 }
1951
1952 /*
1953 * Defaults.
1954 */
1955 if (GCPhysPc == NIL_RTGCPHYS && idTb == UINT32_MAX)
1956 {
1957 if (GCVirt == NIL_RTGCPTR)
1958 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1959 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1960 if (RT_FAILURE(rc))
1961 {
1962 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1963 return;
1964 }
1965 }
1966 if (fFlags == UINT32_MAX && idTb == UINT32_MAX)
1967 {
1968 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1969 fFlags = iemCalcExecFlags(pVCpu);
1970 if (pVM->cCpus == 1)
1971 fFlags |= IEM_F_X86_DISREGARD_LOCK;
1972 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
1973 fFlags |= IEMTB_F_INHIBIT_SHADOW;
1974 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1975 fFlags |= IEMTB_F_INHIBIT_NMI;
1976 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
1977 {
1978 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
1979 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
1980 fFlags |= IEMTB_F_CS_LIM_CHECKS;
1981 }
1982 }
1983
1984 PCIEMTB pTb;
1985 if (idTb == UINT32_MAX)
1986 {
1987 /*
1988 * Do the lookup...
1989 *
1990 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
1991 * have much choice since we don't want to increase use counters and
1992 * trigger native recompilation.
1993 */
1994 fFlags &= IEMTB_F_KEY_MASK;
1995 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
1996 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
1997 pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
1998 while (pTb)
1999 {
2000 if (pTb->GCPhysPc == GCPhysPc)
2001 {
2002 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
2003 {
2004 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
2005 break;
2006 }
2007 }
2008 pTb = pTb->pNext;
2009 }
2010 if (!pTb)
2011 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
2012 }
2013 else
2014 {
2015 /*
2016 * Use the TB ID for indexing.
2017 */
2018 pTb = NULL;
2019 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2020 if (pTbAllocator)
2021 {
2022 size_t const idxTbChunk = idTb / pTbAllocator->cTbsPerChunk;
2023 size_t const idxTbInChunk = idTb % pTbAllocator->cTbsPerChunk;
2024 if (idxTbChunk < pTbAllocator->cAllocatedChunks)
2025 pTb = &pTbAllocator->aChunks[idxTbChunk].paTbs[idxTbInChunk];
2026 else
2027 pHlp->pfnPrintf(pHlp, "Invalid TB ID: %u (%#x)\n", idTb, idTb);
2028 }
2029 }
2030
2031 if (pTb)
2032 {
2033 /*
2034 * Disassemble according to type.
2035 */
2036 size_t const idxTbChunk = pTb->idxAllocChunk;
2037 size_t const idxTbNo = (pTb - &pVCpu->iem.s.pTbAllocatorR3->aChunks[idxTbChunk].paTbs[0])
2038 + idxTbChunk * pVCpu->iem.s.pTbAllocatorR3->cTbsPerChunk;
2039 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2040 {
2041# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2042 case IEMTB_F_TYPE_NATIVE:
2043 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - native\n",
2044 GCPhysPc, iemR3GetTbFlatPc(pTb), fFlags, pVCpu->idCpu, idxTbNo, pTb);
2045 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2046 break;
2047# endif
2048
2049 case IEMTB_F_TYPE_THREADED:
2050 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - threaded\n",
2051 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb);
2052 iemThreadedDisassembleTb(pTb, pHlp);
2053 break;
2054
2055 default:
2056 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - ??? %#x\n",
2057 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb, pTb->fFlags);
2058 break;
2059 }
2060 }
2061}
2062
2063
2064/**
2065 * @callback_method_impl{FNDBGFINFOARGVINT, tbtop}
2066 */
2067static DECLCALLBACK(void) iemR3InfoTbTop(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
2068{
2069 /*
2070 * Parse arguments.
2071 */
2072 static RTGETOPTDEF const s_aOptions[] =
2073 {
2074 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
2075 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
2076 { "--dis", 'd', RTGETOPT_REQ_NOTHING },
2077 { "--disas", 'd', RTGETOPT_REQ_NOTHING },
2078 { "--disasm", 'd', RTGETOPT_REQ_NOTHING },
2079 { "--disassemble", 'd', RTGETOPT_REQ_NOTHING },
2080 { "--no-dis", 'D', RTGETOPT_REQ_NOTHING },
2081 { "--no-disas", 'D', RTGETOPT_REQ_NOTHING },
2082 { "--no-disasm", 'D', RTGETOPT_REQ_NOTHING },
2083 { "--no-disassemble", 'D', RTGETOPT_REQ_NOTHING },
2084 { "--most-freq", 'f', RTGETOPT_REQ_NOTHING },
2085 { "--most-frequent", 'f', RTGETOPT_REQ_NOTHING },
2086 { "--most-frequently", 'f', RTGETOPT_REQ_NOTHING },
2087 { "--most-frequently-used", 'f', RTGETOPT_REQ_NOTHING },
2088 { "--most-recent", 'r', RTGETOPT_REQ_NOTHING },
2089 { "--most-recently", 'r', RTGETOPT_REQ_NOTHING },
2090 { "--most-recently-used", 'r', RTGETOPT_REQ_NOTHING },
2091 { "--count", 'n', RTGETOPT_REQ_UINT32 },
2092 };
2093
2094 RTGETOPTSTATE State;
2095 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
2096 AssertRCReturnVoid(rc);
2097
2098 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
2099 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
2100 enum { kTbTop_MostFrequentlyUsed, kTbTop_MostRececentlyUsed }
2101 enmTop = kTbTop_MostFrequentlyUsed;
2102 bool fDisassemble = false;
2103 uint32_t const cTopDefault = 64;
2104 uint32_t const cTopMin = 1;
2105 uint32_t const cTopMax = 1024;
2106 uint32_t cTop = cTopDefault;
2107
2108 RTGETOPTUNION ValueUnion;
2109 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
2110 {
2111 switch (rc)
2112 {
2113 case 'c':
2114 if (ValueUnion.u32 >= pVM->cCpus)
2115 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
2116 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
2117 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
2118 break;
2119
2120 case 'd':
2121 fDisassemble = true;
2122 break;
2123
2124 case 'D':
2125 fDisassemble = true;
2126 break;
2127
2128 case 'f':
2129 enmTop = kTbTop_MostFrequentlyUsed;
2130 break;
2131
2132 case 'r':
2133 enmTop = kTbTop_MostRececentlyUsed;
2134 break;
2135
2136 case VINF_GETOPT_NOT_OPTION:
2137 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cTop);
2138 if (RT_FAILURE(rc))
2139 {
2140 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
2141 return;
2142 }
2143 ValueUnion.u32 = cTop;
2144 RT_FALL_THROUGH();
2145 case 'n':
2146 if (!ValueUnion.u32)
2147 cTop = cTopDefault;
2148 else
2149 {
2150 cTop = RT_MAX(RT_MIN(ValueUnion.u32, cTopMax), cTopMin);
2151 if (cTop != ValueUnion.u32)
2152 pHlp->pfnPrintf(pHlp, "warning: adjusted %u to %u (valid range: [%u..%u], 0 for default (%d))",
2153 ValueUnion.u32, cTop, cTopMin, cTopMax, cTopDefault);
2154 }
2155 break;
2156
2157 case 'h':
2158 pHlp->pfnPrintf(pHlp,
2159 "Usage: info tbtop [options]\n"
2160 "\n"
2161 "Options:\n"
2162 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
2163 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
2164 " -d, --dis[as[m]], --disassemble\n"
2165 " Show full TB disassembly.\n"
2166 " -D, --no-dis[as[m]], --no-disassemble\n"
2167 " Do not show TB diassembly. The default.\n"
2168 " -f, --most-freq[ent[ly[-used]]]\n"
2169 " Shows the most frequently used TBs (IEMTB::cUsed). The default.\n"
2170 " -r, --most-recent[ly[-used]]\n"
2171 " Shows the most recently used TBs (IEMTB::msLastUsed).\n"
2172 " -n<num>, --count=<num>\n"
2173 " The number of TBs to display. Default: %u\n"
2174 " This is also what non-option arguments will be taken as.\n"
2175 , cTopDefault);
2176 return;
2177
2178 default:
2179 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
2180 return;
2181 }
2182 }
2183
2184 /* Currently, only do work on the same EMT. */
2185 if (pVCpu != pVCpuThis)
2186 {
2187 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
2188 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
2189 return;
2190 }
2191
2192 /*
2193 * Collect the data by scanning the TB allocation map.
2194 */
2195 struct IEMTBTOPENTRY
2196 {
2197 /** Pointer to the translation block. */
2198 PCIEMTB pTb;
2199 /** The sorting key. */
2200 uint64_t uSortKey;
2201 } aTop[cTopMax] = { { NULL, 0 }, };
2202 uint32_t cValid = 0;
2203 PIEMTBALLOCATOR pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2204 if (pTbAllocator)
2205 {
2206 uint32_t const cTbsPerChunk = pTbAllocator->cTbsPerChunk;
2207 for (uint32_t iChunk = 0; iChunk < pTbAllocator->cAllocatedChunks; iChunk++)
2208 {
2209 for (uint32_t iTb = 0; iTb < cTbsPerChunk; iTb++)
2210 {
2211 PCIEMTB const pTb = &pTbAllocator->aChunks[iChunk].paTbs[iTb];
2212 AssertContinue(pTb);
2213 if (pTb->fFlags & IEMTB_F_TYPE_MASK)
2214 {
2215 /* Extract and compose the sort key. */
2216 uint64_t const uSortKey = enmTop == kTbTop_MostFrequentlyUsed
2217 ? RT_MAKE_U64(pTb->msLastUsed, pTb->cUsed)
2218 : RT_MAKE_U64(pTb->cUsed, pTb->msLastUsed);
2219
2220 /*
2221 * Discard the key if it's smaller than the smallest in the table when it is full.
2222 */
2223 if ( cValid >= cTop
2224 && uSortKey <= aTop[cTop - 1].uSortKey)
2225 { /* discard it */ }
2226 else
2227 {
2228 /*
2229 * Do binary search to find the insert location
2230 */
2231 uint32_t idx;
2232 if (cValid > 0)
2233 {
2234 uint32_t idxEnd = cValid;
2235 uint32_t idxStart = 0;
2236 idx = cValid / 2;
2237 for (;;)
2238 {
2239 if (uSortKey > aTop[idx].uSortKey)
2240 {
2241 if (idx > idxStart)
2242 idxEnd = idx;
2243 else
2244 break;
2245 }
2246 else if (uSortKey < aTop[idx].uSortKey)
2247 {
2248 idx += 1;
2249 if (idx < idxEnd)
2250 idxStart = idx;
2251 else
2252 break;
2253 }
2254 else
2255 {
2256 do
2257 idx++;
2258 while (idx < cValid && uSortKey == aTop[idx].uSortKey);
2259 break;
2260 }
2261 idx = idxStart + (idxEnd - idxStart) / 2;
2262 }
2263 AssertContinue(idx < RT_ELEMENTS(aTop));
2264
2265 /*
2266 * Shift entries as needed.
2267 */
2268 if (cValid >= cTop)
2269 {
2270 if (idx != cTop - 1U)
2271 memmove(&aTop[idx + 1], &aTop[idx], (cTop - idx - 1) * sizeof(aTop[0]));
2272 }
2273 else
2274 {
2275 if (idx != cValid)
2276 memmove(&aTop[idx + 1], &aTop[idx], (cValid - idx) * sizeof(aTop[0]));
2277 cValid++;
2278 }
2279 }
2280 else
2281 {
2282 /* Special case: The first insertion. */
2283 cValid = 1;
2284 idx = 0;
2285 }
2286
2287 /*
2288 * Fill in the new entry.
2289 */
2290 aTop[idx].uSortKey = uSortKey;
2291 aTop[idx].pTb = pTb;
2292 }
2293 }
2294 }
2295 }
2296 }
2297
2298 /*
2299 * Display the result.
2300 */
2301 if (cTop > cValid)
2302 cTop = cValid;
2303 pHlp->pfnPrintf(pHlp, "Displaying the top %u TBs for CPU #%u ordered by %s:\n",
2304 cTop, pVCpu->idCpu, enmTop == kTbTop_MostFrequentlyUsed ? "cUsed" : "msLastUsed");
2305 if (fDisassemble)
2306 pHlp->pfnPrintf(pHlp, "================================================================================\n");
2307
2308 for (uint32_t idx = 0; idx < cTop; idx++)
2309 {
2310 if (fDisassemble && idx)
2311 pHlp->pfnPrintf(pHlp, "\n------------------------------- %u -------------------------------\n", idx);
2312
2313 PCIEMTB const pTb = aTop[idx].pTb;
2314 size_t const idxTbChunk = pTb->idxAllocChunk;
2315 Assert(idxTbChunk < pTbAllocator->cAllocatedChunks);
2316 size_t const idxTbNo = (pTb - &pTbAllocator->aChunks[idxTbChunk].paTbs[0])
2317 + idxTbChunk * pTbAllocator->cTbsPerChunk;
2318 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2319 {
2320# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2321 case IEMTB_F_TYPE_NATIVE:
2322 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - native\n",
2323 idxTbNo, pTb->GCPhysPc, iemR3GetTbFlatPc(pTb), pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2324 if (fDisassemble)
2325 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2326 break;
2327# endif
2328
2329 case IEMTB_F_TYPE_THREADED:
2330 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - threaded\n",
2331 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2332 if (fDisassemble)
2333 iemThreadedDisassembleTb(pTb, pHlp);
2334 break;
2335
2336 default:
2337 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - ???\n",
2338 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2339 break;
2340 }
2341 }
2342}
2343
2344#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
2345
2346
2347#ifdef VBOX_WITH_DEBUGGER
2348
2349/** @callback_method_impl{FNDBGCCMD,
2350 * Implements the '.alliem' command. }
2351 */
2352static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2353{
2354 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
2355 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2356 if (pVCpu)
2357 {
2358 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
2359 return VINF_SUCCESS;
2360 }
2361 RT_NOREF(paArgs, cArgs);
2362 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
2363}
2364
2365
2366/**
2367 * Called by IEMR3Init to register debugger commands.
2368 */
2369static void iemR3RegisterDebuggerCommands(void)
2370{
2371 /*
2372 * Register debugger commands.
2373 */
2374 static DBGCCMD const s_aCmds[] =
2375 {
2376 {
2377 /* .pszCmd = */ "iemflushtlb",
2378 /* .cArgsMin = */ 0,
2379 /* .cArgsMax = */ 0,
2380 /* .paArgDescs = */ NULL,
2381 /* .cArgDescs = */ 0,
2382 /* .fFlags = */ 0,
2383 /* .pfnHandler = */ iemR3DbgFlushTlbs,
2384 /* .pszSyntax = */ "",
2385 /* .pszDescription = */ "Flushed the code and data TLBs"
2386 },
2387 };
2388
2389 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
2390 AssertLogRelRC(rc);
2391}
2392
2393#endif /* VBOX_WITH_DEBUGGER */
2394
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette