VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 108244

Last change on this file since 108244 was 108186, checked in by vboxsync, 3 months ago

VMM/IEM: Removed memory write stats since nobody is using the anymore (consumer was PATM); mark APIs as internal where possible. jiraref:VBP-1431

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 146.2 KB
Line 
1/* $Id: IEMR3.cpp 108186 2025-02-12 15:35:15Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39/** @todo this isn't sustainable. */
40#if defined(VBOX_VMM_TARGET_ARMV8)
41# include "IEMInternal-armv8.h"
42#else
43# include "IEMInternal.h"
44#endif
45#include <VBox/vmm/vm.h>
46#include <VBox/vmm/vmapi.h>
47#include <VBox/err.h>
48#ifdef VBOX_WITH_DEBUGGER
49# include <VBox/dbg.h>
50#endif
51
52#include <iprt/assert.h>
53#include <iprt/getopt.h>
54#ifdef IEM_WITH_TLB_TRACE
55# include <iprt/mem.h>
56#endif
57#include <iprt/string.h>
58
59#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
60# include "IEMN8veRecompiler.h"
61# include "IEMThreadedFunctions.h"
62# include "IEMInline.h"
63#endif
64
65
66/*********************************************************************************************************************************
67* Internal Functions *
68*********************************************************************************************************************************/
69static FNDBGFINFOARGVINT iemR3InfoITlb;
70static FNDBGFINFOARGVINT iemR3InfoDTlb;
71#ifdef IEM_WITH_TLB_TRACE
72static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
73#endif
74#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
75static FNDBGFINFOARGVINT iemR3InfoTb;
76static FNDBGFINFOARGVINT iemR3InfoTbTop;
77#endif
78#ifdef VBOX_WITH_DEBUGGER
79static void iemR3RegisterDebuggerCommands(void);
80#endif
81
82
83#if !defined(VBOX_VMM_TARGET_ARMV8)
84static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
85{
86 switch (enmTargetCpu)
87 {
88#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
89 CASE_RET_STR(IEMTARGETCPU_8086);
90 CASE_RET_STR(IEMTARGETCPU_V20);
91 CASE_RET_STR(IEMTARGETCPU_186);
92 CASE_RET_STR(IEMTARGETCPU_286);
93 CASE_RET_STR(IEMTARGETCPU_386);
94 CASE_RET_STR(IEMTARGETCPU_486);
95 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
96 CASE_RET_STR(IEMTARGETCPU_PPRO);
97 CASE_RET_STR(IEMTARGETCPU_CURRENT);
98#undef CASE_RET_STR
99 default: return "Unknown";
100 }
101}
102#endif
103
104
105#if defined(RT_ARCH_ARM64) && defined(_MSC_VER)
106# pragma warning(disable:4883) /* profile build: IEMR3.cpp(114) : warning C4883: 'IEMR3Init': function size suppresses optimizations*/
107#endif
108
109/**
110 * Initializes the interpreted execution manager.
111 *
112 * This must be called after CPUM as we're quering information from CPUM about
113 * the guest and host CPUs.
114 *
115 * @returns VBox status code.
116 * @param pVM The cross context VM structure.
117 */
118VMMR3_INT_DECL(int) IEMR3Init(PVM pVM)
119{
120 /*
121 * Read configuration.
122 */
123#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
124 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
125 int rc;
126#endif
127
128#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
129 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
130 * Controls whether the custom VBox specific CPUID host call interface is
131 * enabled or not. */
132# ifdef DEBUG_bird
133 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
134# else
135 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
136# endif
137 AssertLogRelRCReturn(rc, rc);
138#endif
139
140#ifdef VBOX_WITH_IEM_RECOMPILER
141 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
142 * Max number of TBs per EMT. */
143 uint32_t cMaxTbs = 0;
144 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
145 AssertLogRelRCReturn(rc, rc);
146 if (cMaxTbs < _16K || cMaxTbs > _8M)
147 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
148 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
149
150 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
151 * Initial (minimum) number of TBs per EMT in ring-3. */
152 uint32_t cInitialTbs = 0;
153 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
154 AssertLogRelRCReturn(rc, rc);
155 if (cInitialTbs < _16K || cInitialTbs > _8M)
156 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
157 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
158
159 /* Check that the two values makes sense together. Expect user/api to do
160 the right thing or get lost. */
161 if (cInitialTbs > cMaxTbs)
162 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
163 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
164 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
165
166 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
167 * Max executable memory for recompiled code per EMT. */
168 uint64_t cbMaxExec = 0;
169 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
170 AssertLogRelRCReturn(rc, rc);
171 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
172 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
173 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
174 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
175
176 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
177 * The executable memory allocator chunk size. */
178 uint32_t cbChunkExec = 0;
179 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
180 AssertLogRelRCReturn(rc, rc);
181 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
182 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
183 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
184 cbChunkExec, cbChunkExec, _1M, _256M);
185
186 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
187 * The initial executable memory allocator size (per EMT). The value is
188 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
189 uint64_t cbInitialExec = 0;
190 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
191 AssertLogRelRCReturn(rc, rc);
192 if (cbInitialExec > cbMaxExec)
193 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
194 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
195 cbInitialExec, cbInitialExec, cbMaxExec);
196
197 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
198 * The translation block use count value to do native recompilation at.
199 * Set to zero to disable native recompilation. */
200 uint32_t uTbNativeRecompileAtUsedCount = 16;
201 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
202 AssertLogRelRCReturn(rc, rc);
203
204 /** @cfgm{/IEM/HostICacheInvalidationViaHostAPI, bool, false}
205 * Whether to use any available host OS API for flushing the instruction cache
206 * after completing an translation block. */
207 bool fFlag = false;
208 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationViaHostAPI", &fFlag, false);
209 AssertLogRelRCReturn(rc, rc);
210 uint8_t fHostICacheInvalidation = fFlag ? IEMNATIVE_ICACHE_F_USE_HOST_API : 0;
211
212 /** @cfgm{/IEM/HostICacheInvalidationEndWithIsb, bool, false}
213 * Whether to include an ISB in the instruction cache invalidation sequence
214 * after completing an translation block. */
215 fFlag = false;
216 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationEndWithIsb", &fFlag, false);
217 AssertLogRelRCReturn(rc, rc);
218 if (fFlag)
219 fHostICacheInvalidation |= IEMNATIVE_ICACHE_F_END_WITH_ISH;
220
221#endif /* VBOX_WITH_IEM_RECOMPILER*/
222
223 /*
224 * Initialize per-CPU data and register statistics.
225 */
226#if 1
227 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
228 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
229#else
230 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
231 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
232#endif
233
234 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
235 {
236 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
237 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
238
239 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
240#ifndef VBOX_VMM_TARGET_ARMV8
241 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
242#endif
243 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
244#ifndef VBOX_VMM_TARGET_ARMV8
245 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
246 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
247 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
248 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
249#endif
250
251#ifndef VBOX_VMM_TARGET_ARMV8
252 pVCpu->iem.s.cTbsTillNextTimerPoll = 128;
253 pVCpu->iem.s.cTbsTillNextTimerPollPrev = 128;
254#endif
255
256 /*
257 * Host and guest CPU information.
258 */
259 if (idCpu == 0)
260 {
261 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
262#if !defined(VBOX_VMM_TARGET_ARMV8)
263 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
264 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
265 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
266# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
267 if (pVCpu->iem.s.enmCpuVendor == CPUMGetHostCpuVendor(pVM))
268 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
269 else
270# endif
271 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
272#else
273 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
274 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
275#endif
276
277#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
278 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
279 {
280 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
281 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
282 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
283 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
284 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
285 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
286 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
287 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
288 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
289 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
290 }
291 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
292 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
293 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
294#else
295 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
296 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
297 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
298#endif
299 }
300 else
301 {
302 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
303 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
304 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
305#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
306 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
307#endif
308 }
309
310 /*
311 * Mark all buffers free.
312 */
313 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
314 while (iMemMap-- > 0)
315 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
316
317#ifdef VBOX_WITH_IEM_RECOMPILER
318 /*
319 * Recompiler state and configuration distribution.
320 */
321 pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
322 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
323 pVCpu->iem.s.fHostICacheInvalidation = fHostICacheInvalidation;
324#endif
325
326#ifdef IEM_WITH_TLB_TRACE
327 /*
328 * Allocate trace buffer.
329 */
330 pVCpu->iem.s.idxTlbTraceEntry = 0;
331 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
332 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
333 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
334 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
335#endif
336 }
337
338
339#ifdef VBOX_WITH_IEM_RECOMPILER
340 /*
341 * Initialize the TB allocator and cache (/ hash table).
342 *
343 * This is done by each EMT to try get more optimal thread/numa locality of
344 * the allocations.
345 */
346 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
347 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
348 AssertLogRelRCReturn(rc, rc);
349#endif
350
351 /*
352 * Register statistics.
353 */
354 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
355 {
356#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
357 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
358 char szPat[128];
359 RT_NOREF_PV(szPat); /* lazy bird */
360 char szVal[128];
361 RT_NOREF_PV(szVal); /* lazy bird */
362
363 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
364 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
365 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
366 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
367 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
368 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
369 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
370 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
371 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
372 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
373 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
374 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
375 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
376 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
377 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
378 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
379 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
380 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
381
382 /* Code TLB: */
383 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
384 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
385 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
386 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
387 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
388 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
389 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
390 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
391 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
392 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
393
394 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
395 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
396 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
397 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
398 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
399 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
400
401 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
402 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
403 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
404 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
405 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
406 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
407
408 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
409 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
410 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
411 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
412 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
413 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
414
415 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
416 "Code TLB page invalidation requests", "/IEM/CPU%u/Tlb/Code/InvlPg", idCpu);
417 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
418 "Code TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeGlobal", idCpu);
419 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
420 "Code TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeNonGlobal", idCpu);
421
422 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
423 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
424 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
425 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
426 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
427 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
428# ifdef IEM_WITH_TLB_STATISTICS
429 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
430 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
431# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
432 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
433 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
434 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
435 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
436# endif
437
438 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
439 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
440 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
441
442 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
443 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
444 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
445
446 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
447 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
448 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
449 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
450
451# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
452 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
453 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
454 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
455 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
456 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
457 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
458 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
459 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
460 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
461 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
462 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
463 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
464 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
465 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
466 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
467
468 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
469 "Code TLB native misses on new page",
470 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
471 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
472 "Code TLB native misses on new page w/ offset",
473 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
474# endif
475# endif /* IEM_WITH_TLB_STATISTICS */
476
477 /* Data TLB organized as best we can... */
478 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
479 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
480 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
481 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
482 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
483 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
484 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
485 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
486 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
487 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
488
489 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
490 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
491 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
492 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
493 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
494 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
495
496 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
497 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
498 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
499 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
500 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
501 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
502
503 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
504 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
505 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
506 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
507 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
508 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
509
510 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
511 "Data TLB page invalidation requests", "/IEM/CPU%u/Tlb/Data/InvlPg", idCpu);
512 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
513 "Data TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeGlobal", idCpu);
514 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
515 "Data TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeNonGlobal", idCpu);
516
517 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
518 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
519 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
520 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
521 "Data TLB global loads",
522 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
523 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
524 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
525 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
526 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
527 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
528 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
529 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
530 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
531 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
532
533 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
534 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
535 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
536 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
537 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
538 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
539 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
540 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
541 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
542 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
543 "Data TLB global loads",
544 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
545
546# ifdef IEM_WITH_TLB_STATISTICS
547# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
548 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
549 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
550 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
551 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
552 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
553 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
554 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
555 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
556 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
557 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
558 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
559 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
560 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
561 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
562 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
563# endif
564# endif
565
566# ifdef IEM_WITH_TLB_STATISTICS
567 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
568 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
569 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
570 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
571 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
572 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
573# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
574 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
575 "Data TLB native stack access hits",
576 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
577 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
578 "Data TLB native data fetch hits",
579 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
580 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
581 "Data TLB native data store hits",
582 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
583 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
584 "Data TLB native mapped data hits",
585 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
586# endif
587 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
588 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
589 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
590
591# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
592 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
593 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
594 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
595# endif
596
597 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
598 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
599 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
600
601 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
602 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
603 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
604 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
605
606# endif /* IEM_WITH_TLB_STATISTICS */
607
608
609#ifdef VBOX_WITH_IEM_RECOMPILER
610 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
611 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
612 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
613 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
614 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
615 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
616# ifdef VBOX_WITH_STATISTICS
617 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
618 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
619 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
620 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
621# endif
622
623# ifdef VBOX_WITH_STATISTICS
624 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPoll, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
625 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll", idCpu);
626 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollRun, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
627 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll/Running", idCpu);
628 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollUnchanged, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
629 "Timer polling interval unchanged", "/IEM/CPU%u/re/TimerPoll/Unchanged", idCpu);
630 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollTiny, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
631 "Timer polling interval tiny", "/IEM/CPU%u/re/TimerPoll/Tiny", idCpu);
632 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollDefaultCalc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
633 "Timer polling interval calculated using defaults", "/IEM/CPU%u/re/TimerPoll/DefaultCalc", idCpu);
634 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollMax, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
635 "Timer polling interval maxed out", "/IEM/CPU%u/re/TimerPoll/Max", idCpu);
636 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorDivision, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE,
637 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorDivision", idCpu);
638 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorMultiplication, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
639 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorMultiplication", idCpu);
640# endif
641 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbsTillNextTimerPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
642 "Timer polling interval (in TBs)", "/IEM/CPU%u/re/TimerPollInterval", idCpu);
643
644 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
645 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
646 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
647 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
648 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
649# ifdef VBOX_WITH_STATISTICS
650 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
651 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
652# endif
653 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
654 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
655 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
656 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
657 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
658 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
659 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
660 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
661 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
662 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
663 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
664 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
665 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
666 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
667 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
668 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
669
670 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
671 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
672 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
673
674 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
675 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
676 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
677 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
678 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
679 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
680 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
681 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
682# ifdef VBOX_WITH_STATISTICS
683 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
684 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
685# endif
686
687 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
688 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
689 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
690 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
691 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
692 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
693
694 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
695 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
696 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckTimersBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
697 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckTimersBreaks", idCpu);
698 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
699 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
700 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
701 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
702 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
703 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
704
705 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
706 "Detected loop full TB", "/IEM/CPU%u/re/LoopFullTbDetected", idCpu);
707 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected2, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
708 "Detected loop full TB but looping back to before the first TB instruction",
709 "/IEM/CPU%u/re/LoopFullTbDetected2", idCpu);
710 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
711 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
712
713 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
714 "Number of times the exec memory allocator failed to allocate a large enough buffer",
715 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
716
717 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
718 "Number of threaded calls per TB that have been properly recompiled to native code",
719 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
720 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
721 "Number of threaded calls per TB that could not be recompiler to native code",
722 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
723 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
724 "Number of threaded calls that could not be recompiler to native code",
725 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
726
727 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
728 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
729 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
730 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
731
732# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
733# ifdef VBOX_WITH_STATISTICS
734 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
735 "Number of calls to iemNativeRegAllocFindFree.",
736 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
737# endif
738 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
739 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
740 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
741# ifdef VBOX_WITH_STATISTICS
742 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
743 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
744 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
745 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
746 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
747 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
748 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
749 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
750 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
751
752# define REG_NATIVE_EFL_GROUP(a_Lower, a_Camel) do { \
753 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponed ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
754 "Postponed all status flag updating, " #a_Lower " instructions", \
755 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
756 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkipped ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
757 "Skipped all status flag updating, " #a_Lower " instructions", \
758 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
759 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflTotal ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
760 "Total number of " #a_Lower " intructions with status flag updating", \
761 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
762 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
763 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
764 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
765 "Postponed all status flag updating, " #a_Lower " instructions, percentage", \
766 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "PostponedPct", idCpu); \
767 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
768 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
769 "Skipped all status flag updating, " #a_Lower " instructions, percentage", \
770 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "SkippedPct", idCpu); \
771 } while (0)
772 REG_NATIVE_EFL_GROUP(arithmetic, Arithmetic);
773 REG_NATIVE_EFL_GROUP(logical, Logical);
774 REG_NATIVE_EFL_GROUP(shift, Shift);
775# undef REG_NATIVE_EFL_GROUP
776
777 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponedEmits, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
778 "Postponed EFLAGS calculation emits", "/IEM/CPU%u/re/NativeEFlags/ZZEmits", idCpu);
779
780 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
781 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippable", idCpu);
782 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippable", idCpu);
783 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippable", idCpu);
784 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippable", idCpu);
785 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippable", idCpu);
786
787 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfRequired", idCpu);
788 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfRequired", idCpu);
789 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfRequired", idCpu);
790 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfRequired", idCpu);
791 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfRequired", idCpu);
792 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfRequired", idCpu);
793
794# ifdef IEMLIVENESS_EXTENDED_LAYOUT
795 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfDelayable", idCpu);
796 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfDelayable", idCpu);
797 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfDelayable", idCpu);
798 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfDelayable", idCpu);
799 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfDelayable", idCpu);
800 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfDelayable", idCpu);
801# endif
802
803 /* Sum up all status bits ('_' is a sorting hack). */
804 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fSkippable*", idCpu);
805 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
806 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
807
808 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fRequired*", idCpu);
809 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
810 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
811
812# ifdef IEMLIVENESS_EXTENDED_LAYOUT
813 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fDelayable*", idCpu);
814 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
815 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
816# endif
817
818 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?f*", idCpu);
819 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
820 "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
821
822 /* Corresponding ratios / percentages of the totals. */
823 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
824 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
825 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
826 "Total skippable EFLAGS status bit updating percentage",
827 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippablePct", idCpu);
828
829 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
830 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
831 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
832 "Total required EFLAGS status bit updating percentage",
833 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequiredPct", idCpu);
834
835# ifdef IEMLIVENESS_EXTENDED_LAYOUT
836 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
837 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
838 "Total potentially delayable EFLAGS status bit updating percentage",
839 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayablePct", idCpu);
840# endif
841
842 /* Ratios of individual bits. */
843 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/Cf*", idCpu) - 3;
844 Assert(szPat[offFlagChar] == 'C');
845 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
846 Assert(szVal[offFlagChar] == 'C');
847 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippablePct", idCpu);
848 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippablePct", idCpu);
849 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippablePct", idCpu);
850 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippablePct", idCpu);
851 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippablePct", idCpu);
852 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippablePct", idCpu);
853
854 /* PC updates total and skipped, with PCT ratio. */
855 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
856 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
857 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
858 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
859 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
860 "Delayed RIP updating percentage",
861 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
862
863# endif /* VBOX_WITH_STATISTICS */
864# ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
865 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEndIfOtherBranchDirty, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
866 "IEM_MC_ENDIF flushing dirty shadow registers for other branch (not good).",
867 "/IEM/CPU%u/re/NativeEndIfOtherBranchDirty", idCpu);
868# endif
869# ifdef VBOX_WITH_STATISTICS
870 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
871 "Number of calls to iemNativeSimdRegAllocFindFree.",
872 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
873 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
874 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
875 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
876 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
877 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
878 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
879 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
880 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
881 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
882 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
883 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
884 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
885
886 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
887 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
888 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
889 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
890 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
891 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
892 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
893 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
894
895 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
896 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
897 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
898 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
899 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
900 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
901 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
902 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
903
904 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
905 "Number of times the TB finishes execution completely",
906 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
907# endif /* VBOX_WITH_STATISTICS */
908 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
909 "Number of times the TB finished through the ReturnBreak label",
910 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
911 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
912 "Number of times the TB finished through the ReturnBreak label",
913 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
914 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
915 "Number of times the TB finished through the ReturnWithFlags label",
916 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
917 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
918 "Number of times the TB finished with some other status value",
919 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
920 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
921 "Number of times the TB finished via long jump / throw",
922 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
923 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
924 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
925 "Number of times the TB finished through the ObsoleteTb label",
926 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
927 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
928 "Number of times the TB finished through the NeedCsLimChecking label",
929 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
930 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
931 "Number of times the TB finished through the CheckBranchMiss label",
932 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
933 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
934 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
935# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
936# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
937# else
938# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
939# endif
940 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
941 "Number of times the TB finished raising a #DE exception",
942 RAISE_PREFIX "RaiseDe", idCpu);
943 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
944 "Number of times the TB finished raising a #UD exception",
945 RAISE_PREFIX "RaiseUd", idCpu);
946 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
947 "Number of times the TB finished raising a SSE related exception",
948 RAISE_PREFIX "RaiseSseRelated", idCpu);
949 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
950 "Number of times the TB finished raising a AVX related exception",
951 RAISE_PREFIX "RaiseAvxRelated", idCpu);
952 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
953 "Number of times the TB finished raising a SSE/AVX floating point related exception",
954 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
955 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
956 "Number of times the TB finished raising a #NM exception",
957 RAISE_PREFIX "RaiseNm", idCpu);
958 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
959 "Number of times the TB finished raising a #GP(0) exception",
960 RAISE_PREFIX "RaiseGp0", idCpu);
961 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
962 "Number of times the TB finished raising a #MF exception",
963 RAISE_PREFIX "RaiseMf", idCpu);
964 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
965 "Number of times the TB finished raising a #XF exception",
966 RAISE_PREFIX "RaiseXf", idCpu);
967
968# ifdef VBOX_WITH_STATISTICS
969 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLoopFullTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
970 "Number of full TB loops.",
971 "/IEM/CPU%u/re/NativeTbExit/LoopFullTb", idCpu);
972# endif
973
974 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
975 "Direct linking #1 with IRQ check succeeded",
976 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
977 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
978 "Direct linking #1 w/o IRQ check succeeded",
979 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
980# ifdef VBOX_WITH_STATISTICS
981 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
982 "Direct linking #1 failed: No TB in lookup table",
983 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
984 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
985 "Direct linking #1 failed: GCPhysPc mismatch",
986 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
987 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
988 "Direct linking #1 failed: TB flags mismatch",
989 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
990 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
991 "Direct linking #1 failed: IRQ or FF pending",
992 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
993# endif
994
995 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
996 "Direct linking #2 with IRQ check succeeded",
997 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
998 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
999 "Direct linking #2 w/o IRQ check succeeded",
1000 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
1001# ifdef VBOX_WITH_STATISTICS
1002 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1003 "Direct linking #2 failed: No TB in lookup table",
1004 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
1005 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1006 "Direct linking #2 failed: GCPhysPc mismatch",
1007 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
1008 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1009 "Direct linking #2 failed: TB flags mismatch",
1010 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
1011 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1012 "Direct linking #2 failed: IRQ or FF pending",
1013 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
1014# endif
1015
1016 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
1017 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
1018 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
1019 "/IEM/CPU%u/re/NativeTbExit", idCpu);
1020
1021
1022# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
1023
1024
1025# ifdef VBOX_WITH_STATISTICS
1026 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1027 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
1028 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1029 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
1030 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1031 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
1032 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1033 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
1034# endif
1035
1036
1037#endif /* VBOX_WITH_IEM_RECOMPILER */
1038
1039 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
1040 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1041 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
1042 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
1043 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1044 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
1045
1046# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
1047 /* Instruction statistics: */
1048# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
1049 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1050 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
1051 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1052 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
1053# include "IEMInstructionStatisticsTmpl.h"
1054# undef IEM_DO_INSTR_STAT
1055# endif
1056
1057# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1058 /* Threaded function statistics: */
1059 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
1060 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
1061 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
1062# endif
1063
1064
1065 for (unsigned i = 1; i < RT_ELEMENTS(pVCpu->iem.s.aStatAdHoc); i++)
1066 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatAdHoc[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1067 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/AdHoc/%02u", idCpu, i);
1068
1069#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
1070 }
1071
1072#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1073 /*
1074 * Register the per-VM VMX APIC-access page handler type.
1075 */
1076 if (pVM->cpum.ro.GuestFeatures.fVmx)
1077 {
1078 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
1079 iemVmxApicAccessPageHandler,
1080 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
1081 AssertLogRelRCReturn(rc, rc);
1082 }
1083#endif
1084
1085 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1086 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1087#ifdef IEM_WITH_TLB_TRACE
1088 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
1089#endif
1090#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1091 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
1092 DBGFR3InfoRegisterInternalArgv(pVM, "tbtop", "IEM translation blocks most used or most recently used",
1093 iemR3InfoTbTop, DBGFINFO_FLAGS_RUN_ON_EMT);
1094#endif
1095#ifdef VBOX_WITH_DEBUGGER
1096 iemR3RegisterDebuggerCommands();
1097#endif
1098
1099 return VINF_SUCCESS;
1100}
1101
1102
1103VMMR3_INT_DECL(int) IEMR3Term(PVM pVM)
1104{
1105 NOREF(pVM);
1106#ifdef IEM_WITH_TLB_TRACE
1107 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1108 {
1109 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1110 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1111 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1112 }
1113#endif
1114#if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) && defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
1115 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1116 iemThreadedSaveTbForProfilingCleanup(pVM->apCpusR3[idCpu]);
1117#endif
1118 return VINF_SUCCESS;
1119}
1120
1121
1122VMMR3_INT_DECL(void) IEMR3Relocate(PVM pVM)
1123{
1124 RT_NOREF(pVM);
1125}
1126
1127
1128/**
1129 * Gets the name of a generic IEM exit code.
1130 *
1131 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1132 * @param uExit The IEM exit to name.
1133 */
1134VMMR3_INT_DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1135{
1136 static const char * const s_apszNames[] =
1137 {
1138 /* external interrupts */
1139 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1140 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1141 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1142 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1143 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1144 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1145 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1146 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1147 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1148 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1149 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1150 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1151 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1152 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1153 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1154 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1155 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1156 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1157 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1158 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1159 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1160 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1161 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1162 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1163 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1164 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1165 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1166 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1167 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1168 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1169 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1170 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1171 /* software interrups */
1172 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1173 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1174 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1175 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1176 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1177 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1178 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1179 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1180 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1181 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1182 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1183 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1184 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1185 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1186 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1187 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1188 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1189 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1190 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1191 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1192 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1193 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1194 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1195 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1196 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1197 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1198 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1199 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1200 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1201 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1202 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1203 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1204 };
1205 if (uExit < RT_ELEMENTS(s_apszNames))
1206 return s_apszNames[uExit];
1207 return NULL;
1208}
1209
1210
1211/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1212static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1213{
1214 if (*pfHeader)
1215 return;
1216 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1217 *pfHeader = true;
1218}
1219
1220
1221#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1222#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1223
1224/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1225static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1226 uint32_t uSlot, uint32_t fFlags)
1227{
1228#ifndef VBOX_VMM_TARGET_ARMV8
1229 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1230#else
1231 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1232#endif
1233 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1234 return;
1235
1236 /* The address needs to be sign extended, thus the shifting fun here.*/
1237 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1238 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1239 const char *pszValid = "";
1240#ifndef VBOX_VMM_TARGET_ARMV8
1241 char szTmp[128];
1242 if (fFlags & IEMR3INFOTLB_F_CHECK)
1243 {
1244 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1245 PGMPTWALKFAST WalkFast;
1246 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1247 pszValid = szTmp;
1248 if (RT_FAILURE(rc))
1249 switch (rc)
1250 {
1251 case VERR_PAGE_TABLE_NOT_PRESENT:
1252 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1253 {
1254 case 1: pszValid = " stale(page-not-present)"; break;
1255 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1256 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1257 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1258 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1259 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1260 }
1261 break;
1262 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1263 }
1264 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1265 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1266 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1267 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1268 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1269 | fInvSlotG ) )
1270 pszValid = " still-valid";
1271 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1272 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1273 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1274 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1275 {
1276 case X86_PTE_A:
1277 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1278 break;
1279 case X86_PTE_D:
1280 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1281 break;
1282 case X86_PTE_D | X86_PTE_A:
1283 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1284 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1285 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1286 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1287 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1288 break;
1289 default: AssertFailed(); break;
1290 }
1291 else
1292 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1293 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1294 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1295 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1296 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1297 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1298 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1299 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1300 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1301 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1302 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1303 }
1304#else
1305 RT_NOREF(pVCpu);
1306#endif
1307
1308 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1309 uSlot,
1310 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1311 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1312 : "expired",
1313 GCPtr, /* -> */
1314 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1315 /* / */
1316 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1317 /* */
1318 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1319 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1320 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1321 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1322 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1323 !(uSlot & 1) ? "-" : "G",
1324 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1325 /* / */
1326 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1327 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1328 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1329 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1330 /* / */
1331 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1332 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1333 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1334 pszValid);
1335}
1336
1337
1338/** Displays one or more TLB slots. */
1339static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1340 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1341{
1342 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1343 {
1344 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1345 {
1346 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1347 cSlots, RT_ELEMENTS(pTlb->aEntries));
1348 cSlots = RT_ELEMENTS(pTlb->aEntries);
1349 }
1350
1351 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1352 while (cSlots-- > 0)
1353 {
1354 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1355 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1356 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1357 }
1358 }
1359 else
1360 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1361 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1362}
1363
1364
1365/** Displays the TLB slot for the given address. */
1366static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1367 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1368{
1369 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1370
1371 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(uAddress);
1372#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1373 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1374#else
1375 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1376#endif
1377 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1378#ifndef VBOX_VMM_TARGET_ARMV8
1379 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1380#endif
1381 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1382 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1383 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1384 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1385
1386#ifndef VBOX_VMM_TARGET_ARMV8
1387 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1388 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1389 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1390 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1391#endif
1392}
1393
1394
1395/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1396static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1397{
1398 /*
1399 * This is entirely argument driven.
1400 */
1401 static RTGETOPTDEF const s_aOptions[] =
1402 {
1403 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1404 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1405 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1406 { "all", 'A', RTGETOPT_REQ_NOTHING },
1407 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1408 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1409 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1410 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1411 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1412 };
1413
1414 RTGETOPTSTATE State;
1415 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1416 AssertRCReturnVoid(rc);
1417
1418 uint32_t cActionArgs = 0;
1419 bool fNeedHeader = true;
1420 bool fAddressMode = true;
1421 uint32_t fFlags = 0;
1422 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1423 PVMCPU pVCpu = pVCpuCall;
1424 if (!pVCpu)
1425 pVCpu = VMMGetCpuById(pVM, 0);
1426
1427 RTGETOPTUNION ValueUnion;
1428 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1429 {
1430 switch (rc)
1431 {
1432 case 'c':
1433 if (ValueUnion.u32 >= pVM->cCpus)
1434 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1435 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1436 {
1437 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1438 fNeedHeader = true;
1439 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1440 {
1441 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1442 ValueUnion.u32, pVCpuCall->idCpu);
1443 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1444 }
1445 }
1446 break;
1447
1448 case 'C':
1449 if (!pVCpuCall)
1450 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1451 else if (pVCpu != pVCpuCall)
1452 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1453 pVCpu->idCpu, pVCpuCall->idCpu);
1454 else
1455 fFlags |= IEMR3INFOTLB_F_CHECK;
1456 break;
1457
1458 case 'a':
1459 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1460 ValueUnion.u64, fFlags, &fNeedHeader);
1461 fAddressMode = true;
1462 cActionArgs++;
1463 break;
1464
1465 case 'A':
1466 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1467 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1468 cActionArgs++;
1469 break;
1470
1471 case 'r':
1472 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1473 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1474 fAddressMode = false;
1475 cActionArgs++;
1476 break;
1477
1478 case 's':
1479 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1480 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1481 fAddressMode = false;
1482 cActionArgs++;
1483 break;
1484
1485 case 'v':
1486 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1487 break;
1488
1489 case VINF_GETOPT_NOT_OPTION:
1490 if (fAddressMode)
1491 {
1492 uint64_t uAddr;
1493 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1494 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1495 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1496 uAddr, fFlags, &fNeedHeader);
1497 else
1498 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1499 }
1500 else
1501 {
1502 uint32_t uSlot;
1503 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1504 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1505 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1506 uSlot, 1, fFlags, &fNeedHeader);
1507 else
1508 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1509 }
1510 cActionArgs++;
1511 break;
1512
1513 case 'h':
1514 pHlp->pfnPrintf(pHlp,
1515 "Usage: info %ctlb [options]\n"
1516 "\n"
1517 "Options:\n"
1518 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1519 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1520 " -C,--check\n"
1521 " Check valid entries against guest PTs.\n"
1522 " -A, --all, all\n"
1523 " Display all the TLB entries (default if no other args).\n"
1524 " -a<virt>, --address=<virt>\n"
1525 " Shows the TLB entry for the specified guest virtual address.\n"
1526 " -r<slot:count>, --range=<slot:count>\n"
1527 " Shows the TLB entries for the specified slot range.\n"
1528 " -s<slot>,--slot=<slot>\n"
1529 " Shows the given TLB slot.\n"
1530 " -v,--only-valid\n"
1531 " Only show valid TLB entries (TAG, not phys)\n"
1532 "\n"
1533 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1534 "defaulting to addresses if not preceeded by any of those options.\n"
1535 , fITlb ? 'i' : 'd');
1536 return;
1537
1538 default:
1539 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1540 return;
1541 }
1542 }
1543
1544 /*
1545 * If no action taken, we display all (-A) by default.
1546 */
1547 if (!cActionArgs)
1548 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1549 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1550}
1551
1552
1553/**
1554 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1555 */
1556static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1557{
1558 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1559}
1560
1561
1562/**
1563 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1564 */
1565static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1566{
1567 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1568}
1569
1570
1571#ifdef IEM_WITH_TLB_TRACE
1572/**
1573 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1574 */
1575static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1576{
1577 /*
1578 * Parse arguments.
1579 */
1580 static RTGETOPTDEF const s_aOptions[] =
1581 {
1582 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1583 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1584 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1585 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1586 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1587 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1588 };
1589
1590 RTGETOPTSTATE State;
1591 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1592 AssertRCReturnVoid(rc);
1593
1594 uint32_t cLimit = UINT32_MAX;
1595 bool fStopAtGlobalFlush = false;
1596 bool fResolveRip = false;
1597 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1598 PVMCPU pVCpu = pVCpuCall;
1599 if (!pVCpu)
1600 pVCpu = VMMGetCpuById(pVM, 0);
1601
1602 RTGETOPTUNION ValueUnion;
1603 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1604 {
1605 switch (rc)
1606 {
1607 case 'c':
1608 if (ValueUnion.u32 >= pVM->cCpus)
1609 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1610 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1611 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1612 break;
1613
1614 case 'l':
1615 cLimit = ValueUnion.u32;
1616 break;
1617
1618 case 'g':
1619 fStopAtGlobalFlush = true;
1620 break;
1621
1622 case 'r':
1623 fResolveRip = true;
1624 break;
1625
1626 case 'h':
1627 pHlp->pfnPrintf(pHlp,
1628 "Usage: info tlbtrace [options] [n]\n"
1629 "\n"
1630 "Options:\n"
1631 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1632 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1633 " [n], -l<n>, --last=<n>\n"
1634 " Limit display to the last N entries. Default: all\n"
1635 " -g, --stop-at-global-flush\n"
1636 " Stop after the first global flush entry.\n"
1637 " -r, --resolve-rip\n"
1638 " Resolve symbols for the flattened RIP addresses.\n"
1639 );
1640 return;
1641
1642 case VINF_GETOPT_NOT_OPTION:
1643 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1644 if (RT_SUCCESS(rc))
1645 break;
1646 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1647 return;
1648
1649 default:
1650 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1651 return;
1652 }
1653 }
1654
1655 /*
1656 * Get the details.
1657 */
1658 AssertReturnVoid(pVCpu);
1659 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1660 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1661 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1662 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1663 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1664 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1665 if (cLeft && paEntries)
1666 {
1667 /*
1668 * Display the entries.
1669 */
1670 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1671 while (cLeft-- > 0)
1672 {
1673 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1674 const char *pszSymbol = "";
1675 union
1676 {
1677 RTDBGSYMBOL Symbol;
1678 char ach[sizeof(RTDBGSYMBOL) + 32];
1679 } uBuf;
1680 if (fResolveRip)
1681 {
1682 RTGCINTPTR offDisp = 0;
1683 DBGFADDRESS Addr;
1684 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1685 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1686 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1687 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1688 &offDisp, &uBuf.Symbol, NULL);
1689 if (RT_SUCCESS(rc))
1690 {
1691 /* Add displacement. */
1692 if (offDisp)
1693 {
1694 size_t const cchName = strlen(uBuf.Symbol.szName);
1695 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1696 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1697 if (offDisp > 0)
1698 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1699 else
1700 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1701 }
1702
1703 /* Put a space before it. */
1704 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1705 char *pszName = uBuf.Symbol.szName;
1706 *--pszName = ' ';
1707 pszSymbol = pszName;
1708 }
1709 }
1710 static const char *s_apszTlbType[2] = { "code", "data" };
1711 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1712 switch (pCur->enmType)
1713 {
1714 case kIemTlbTraceType_InvlPg:
1715 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1716 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param), pszSymbol);
1717 break;
1718 case kIemTlbTraceType_EvictSlot:
1719 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1720 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1721 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1722 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1723 pCur->u64Param2, pszSymbol);
1724 break;
1725 case kIemTlbTraceType_LargeEvictSlot:
1726 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1727 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1728 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1729 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1730 pCur->u64Param2, pszSymbol);
1731 break;
1732 case kIemTlbTraceType_LargeScan:
1733 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1734 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1735 break;
1736
1737 case kIemTlbTraceType_Flush:
1738 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1739 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1740 break;
1741 case kIemTlbTraceType_FlushGlobal:
1742 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1743 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1744 if (fStopAtGlobalFlush)
1745 return;
1746 break;
1747 case kIemTlbTraceType_Load:
1748 case kIemTlbTraceType_LoadGlobal:
1749 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1750 idx, pCur->rip,
1751 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1752 pCur->u64Param,
1753 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)
1754 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1755 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1756 break;
1757
1758 case kIemTlbTraceType_Load_Cr0:
1759 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1760 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1761 break;
1762 case kIemTlbTraceType_Load_Cr3:
1763 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1764 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1765 break;
1766 case kIemTlbTraceType_Load_Cr4:
1767 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1768 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1769 break;
1770 case kIemTlbTraceType_Load_Efer:
1771 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1772 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1773 break;
1774
1775 case kIemTlbTraceType_Irq:
1776 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1777 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1778 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1779 pszSymbol);
1780 break;
1781 case kIemTlbTraceType_Xcpt:
1782 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1783 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1784 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1785 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1786 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1787 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1788 else
1789 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1790 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1791 break;
1792 case kIemTlbTraceType_IRet:
1793 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1794 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1795 break;
1796
1797 case kIemTlbTraceType_Tb_Compile:
1798 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1799 idx, pCur->rip, pCur->u64Param, pszSymbol);
1800 break;
1801 case kIemTlbTraceType_Tb_Exec_Threaded:
1802 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1803 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1804 break;
1805 case kIemTlbTraceType_Tb_Exec_Native:
1806 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1807 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1808 break;
1809
1810 case kIemTlbTraceType_User0:
1811 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1812 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1813 break;
1814 case kIemTlbTraceType_User1:
1815 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1816 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1817 break;
1818 case kIemTlbTraceType_User2:
1819 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1820 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1821 break;
1822 case kIemTlbTraceType_User3:
1823 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1824 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1825 break;
1826
1827 case kIemTlbTraceType_Invalid:
1828 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1829 break;
1830 }
1831 }
1832 }
1833 else
1834 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1835}
1836#endif /* IEM_WITH_TLB_TRACE */
1837
1838#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1839
1840/**
1841 * Get get compile time flat PC for the TB.
1842 */
1843DECL_FORCE_INLINE(RTGCPTR) iemR3GetTbFlatPc(PCIEMTB pTb)
1844{
1845#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
1846 if (pTb->fFlags & IEMTB_F_TYPE_NATIVE)
1847 {
1848 PCIEMTBDBG const pDbgInfo = pTb->pDbgInfo;
1849 return pDbgInfo ? pDbgInfo->FlatPc : RTGCPTR_MAX;
1850 }
1851#endif
1852 return pTb->FlatPc;
1853}
1854
1855
1856/**
1857 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1858 */
1859static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1860{
1861 /*
1862 * Parse arguments.
1863 */
1864 static RTGETOPTDEF const s_aOptions[] =
1865 {
1866 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1867 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1868 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1869 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1870 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1871 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1872 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1873 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1874 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1875 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1876 { "--tb", 't', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1877 { "--tb-id", 't', RTGETOPT_REQ_UINT32 },
1878 };
1879
1880 RTGETOPTSTATE State;
1881 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1882 AssertRCReturnVoid(rc);
1883
1884 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1885 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1886 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1887 RTGCPHYS GCVirt = NIL_RTGCPTR;
1888 uint32_t fFlags = UINT32_MAX;
1889 uint32_t idTb = UINT32_MAX;
1890
1891 RTGETOPTUNION ValueUnion;
1892 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1893 {
1894 switch (rc)
1895 {
1896 case 'c':
1897 if (ValueUnion.u32 >= pVM->cCpus)
1898 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1899 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1900 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1901 break;
1902
1903 case 'a':
1904 GCVirt = ValueUnion.u64;
1905 GCPhysPc = NIL_RTGCPHYS;
1906 idTb = UINT32_MAX;
1907 break;
1908
1909 case 'p':
1910 GCVirt = NIL_RTGCPHYS;
1911 GCPhysPc = ValueUnion.u64;
1912 idTb = UINT32_MAX;
1913 break;
1914
1915 case 'f':
1916 fFlags = ValueUnion.u32;
1917 break;
1918
1919 case 't':
1920 GCVirt = NIL_RTGCPHYS;
1921 GCPhysPc = NIL_RTGCPHYS;
1922 idTb = ValueUnion.u32;
1923 break;
1924
1925 case VINF_GETOPT_NOT_OPTION:
1926 {
1927 if ( (ValueUnion.psz[0] == 'T' || ValueUnion.psz[0] == 't')
1928 && (ValueUnion.psz[1] == 'B' || ValueUnion.psz[1] == 'b')
1929 && ValueUnion.psz[2] == '#')
1930 {
1931 rc = RTStrToUInt32Full(&ValueUnion.psz[3], 0, &idTb);
1932 if (RT_SUCCESS(rc))
1933 {
1934 GCVirt = NIL_RTGCPHYS;
1935 GCPhysPc = NIL_RTGCPHYS;
1936 break;
1937 }
1938 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to TD ID: %Rrc\n", ValueUnion.psz, rc);
1939 }
1940 else
1941 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1942 return;
1943 }
1944
1945 case 'h':
1946 pHlp->pfnPrintf(pHlp,
1947 "Usage: info tb [options]\n"
1948 "\n"
1949 "Options:\n"
1950 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1951 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1952 " -a<virt>, --address=<virt>\n"
1953 " Shows the TB for the specified guest virtual address.\n"
1954 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1955 " Shows the TB for the specified guest physical address.\n"
1956 " -t<id>, --tb=<id>, --tb-id=<id>, TD#<id>\n"
1957 " Show the TB specified by the identifier/number (from tbtop).\n"
1958 " -f<flags>,--flags=<flags>\n"
1959 " The TB flags value (hex) to use when looking up the TB.\n"
1960 "\n"
1961 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1962 return;
1963
1964 default:
1965 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1966 return;
1967 }
1968 }
1969
1970 /* Currently, only do work on the same EMT. */
1971 if (pVCpu != pVCpuThis)
1972 {
1973 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1974 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1975 return;
1976 }
1977
1978 /*
1979 * Defaults.
1980 */
1981 if (GCPhysPc == NIL_RTGCPHYS && idTb == UINT32_MAX)
1982 {
1983 if (GCVirt == NIL_RTGCPTR)
1984 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1985 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1986 if (RT_FAILURE(rc))
1987 {
1988 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1989 return;
1990 }
1991 }
1992 if (fFlags == UINT32_MAX && idTb == UINT32_MAX)
1993 {
1994 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1995 fFlags = iemCalcExecFlags(pVCpu);
1996 if (pVM->cCpus == 1)
1997 fFlags |= IEM_F_X86_DISREGARD_LOCK;
1998 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
1999 fFlags |= IEMTB_F_INHIBIT_SHADOW;
2000 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
2001 fFlags |= IEMTB_F_INHIBIT_NMI;
2002 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
2003 {
2004 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
2005 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
2006 fFlags |= IEMTB_F_CS_LIM_CHECKS;
2007 }
2008 }
2009
2010 PCIEMTB pTb;
2011 if (idTb == UINT32_MAX)
2012 {
2013 /*
2014 * Do the lookup...
2015 *
2016 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
2017 * have much choice since we don't want to increase use counters and
2018 * trigger native recompilation.
2019 */
2020 fFlags &= IEMTB_F_KEY_MASK;
2021 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
2022 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
2023 pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
2024 while (pTb)
2025 {
2026 if (pTb->GCPhysPc == GCPhysPc)
2027 {
2028 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
2029 {
2030 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
2031 break;
2032 }
2033 }
2034 pTb = pTb->pNext;
2035 }
2036 if (!pTb)
2037 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
2038 }
2039 else
2040 {
2041 /*
2042 * Use the TB ID for indexing.
2043 */
2044 pTb = NULL;
2045 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2046 if (pTbAllocator)
2047 {
2048 size_t const idxTbChunk = idTb / pTbAllocator->cTbsPerChunk;
2049 size_t const idxTbInChunk = idTb % pTbAllocator->cTbsPerChunk;
2050 if (idxTbChunk < pTbAllocator->cAllocatedChunks)
2051 pTb = &pTbAllocator->aChunks[idxTbChunk].paTbs[idxTbInChunk];
2052 else
2053 pHlp->pfnPrintf(pHlp, "Invalid TB ID: %u (%#x)\n", idTb, idTb);
2054 }
2055 }
2056
2057 if (pTb)
2058 {
2059 /*
2060 * Disassemble according to type.
2061 */
2062 size_t const idxTbChunk = pTb->idxAllocChunk;
2063 size_t const idxTbNo = (pTb - &pVCpu->iem.s.pTbAllocatorR3->aChunks[idxTbChunk].paTbs[0])
2064 + idxTbChunk * pVCpu->iem.s.pTbAllocatorR3->cTbsPerChunk;
2065 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2066 {
2067# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2068 case IEMTB_F_TYPE_NATIVE:
2069 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - native\n",
2070 GCPhysPc, iemR3GetTbFlatPc(pTb), fFlags, pVCpu->idCpu, idxTbNo, pTb);
2071 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2072 break;
2073# endif
2074
2075 case IEMTB_F_TYPE_THREADED:
2076 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - threaded\n",
2077 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb);
2078 iemThreadedDisassembleTb(pTb, pHlp);
2079 break;
2080
2081 default:
2082 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - ??? %#x\n",
2083 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb, pTb->fFlags);
2084 break;
2085 }
2086 }
2087}
2088
2089
2090/**
2091 * @callback_method_impl{FNDBGFINFOARGVINT, tbtop}
2092 */
2093static DECLCALLBACK(void) iemR3InfoTbTop(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
2094{
2095 /*
2096 * Parse arguments.
2097 */
2098 static RTGETOPTDEF const s_aOptions[] =
2099 {
2100 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
2101 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
2102 { "--dis", 'd', RTGETOPT_REQ_NOTHING },
2103 { "--disas", 'd', RTGETOPT_REQ_NOTHING },
2104 { "--disasm", 'd', RTGETOPT_REQ_NOTHING },
2105 { "--disassemble", 'd', RTGETOPT_REQ_NOTHING },
2106 { "--no-dis", 'D', RTGETOPT_REQ_NOTHING },
2107 { "--no-disas", 'D', RTGETOPT_REQ_NOTHING },
2108 { "--no-disasm", 'D', RTGETOPT_REQ_NOTHING },
2109 { "--no-disassemble", 'D', RTGETOPT_REQ_NOTHING },
2110 { "--most-freq", 'f', RTGETOPT_REQ_NOTHING },
2111 { "--most-frequent", 'f', RTGETOPT_REQ_NOTHING },
2112 { "--most-frequently", 'f', RTGETOPT_REQ_NOTHING },
2113 { "--most-frequently-used", 'f', RTGETOPT_REQ_NOTHING },
2114 { "--most-recent", 'r', RTGETOPT_REQ_NOTHING },
2115 { "--most-recently", 'r', RTGETOPT_REQ_NOTHING },
2116 { "--most-recently-used", 'r', RTGETOPT_REQ_NOTHING },
2117 { "--count", 'n', RTGETOPT_REQ_UINT32 },
2118 };
2119
2120 RTGETOPTSTATE State;
2121 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
2122 AssertRCReturnVoid(rc);
2123
2124 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
2125 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
2126 enum { kTbTop_MostFrequentlyUsed, kTbTop_MostRececentlyUsed }
2127 enmTop = kTbTop_MostFrequentlyUsed;
2128 bool fDisassemble = false;
2129 uint32_t const cTopDefault = 64;
2130 uint32_t const cTopMin = 1;
2131 uint32_t const cTopMax = 1024;
2132 uint32_t cTop = cTopDefault;
2133
2134 RTGETOPTUNION ValueUnion;
2135 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
2136 {
2137 switch (rc)
2138 {
2139 case 'c':
2140 if (ValueUnion.u32 >= pVM->cCpus)
2141 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
2142 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
2143 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
2144 break;
2145
2146 case 'd':
2147 fDisassemble = true;
2148 break;
2149
2150 case 'D':
2151 fDisassemble = true;
2152 break;
2153
2154 case 'f':
2155 enmTop = kTbTop_MostFrequentlyUsed;
2156 break;
2157
2158 case 'r':
2159 enmTop = kTbTop_MostRececentlyUsed;
2160 break;
2161
2162 case VINF_GETOPT_NOT_OPTION:
2163 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cTop);
2164 if (RT_FAILURE(rc))
2165 {
2166 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
2167 return;
2168 }
2169 ValueUnion.u32 = cTop;
2170 RT_FALL_THROUGH();
2171 case 'n':
2172 if (!ValueUnion.u32)
2173 cTop = cTopDefault;
2174 else
2175 {
2176 cTop = RT_MAX(RT_MIN(ValueUnion.u32, cTopMax), cTopMin);
2177 if (cTop != ValueUnion.u32)
2178 pHlp->pfnPrintf(pHlp, "warning: adjusted %u to %u (valid range: [%u..%u], 0 for default (%d))",
2179 ValueUnion.u32, cTop, cTopMin, cTopMax, cTopDefault);
2180 }
2181 break;
2182
2183 case 'h':
2184 pHlp->pfnPrintf(pHlp,
2185 "Usage: info tbtop [options]\n"
2186 "\n"
2187 "Options:\n"
2188 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
2189 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
2190 " -d, --dis[as[m]], --disassemble\n"
2191 " Show full TB disassembly.\n"
2192 " -D, --no-dis[as[m]], --no-disassemble\n"
2193 " Do not show TB diassembly. The default.\n"
2194 " -f, --most-freq[ent[ly[-used]]]\n"
2195 " Shows the most frequently used TBs (IEMTB::cUsed). The default.\n"
2196 " -r, --most-recent[ly[-used]]\n"
2197 " Shows the most recently used TBs (IEMTB::msLastUsed).\n"
2198 " -n<num>, --count=<num>\n"
2199 " The number of TBs to display. Default: %u\n"
2200 " This is also what non-option arguments will be taken as.\n"
2201 , cTopDefault);
2202 return;
2203
2204 default:
2205 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
2206 return;
2207 }
2208 }
2209
2210 /* Currently, only do work on the same EMT. */
2211 if (pVCpu != pVCpuThis)
2212 {
2213 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
2214 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
2215 return;
2216 }
2217
2218 /*
2219 * Collect the data by scanning the TB allocation map.
2220 */
2221 struct IEMTBTOPENTRY
2222 {
2223 /** Pointer to the translation block. */
2224 PCIEMTB pTb;
2225 /** The sorting key. */
2226 uint64_t uSortKey;
2227 } aTop[cTopMax] = { { NULL, 0 }, };
2228 uint32_t cValid = 0;
2229 PIEMTBALLOCATOR pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2230 if (pTbAllocator)
2231 {
2232 uint32_t const cTbsPerChunk = pTbAllocator->cTbsPerChunk;
2233 for (uint32_t iChunk = 0; iChunk < pTbAllocator->cAllocatedChunks; iChunk++)
2234 {
2235 for (uint32_t iTb = 0; iTb < cTbsPerChunk; iTb++)
2236 {
2237 PCIEMTB const pTb = &pTbAllocator->aChunks[iChunk].paTbs[iTb];
2238 AssertContinue(pTb);
2239 if (pTb->fFlags & IEMTB_F_TYPE_MASK)
2240 {
2241 /* Extract and compose the sort key. */
2242 uint64_t const uSortKey = enmTop == kTbTop_MostFrequentlyUsed
2243 ? RT_MAKE_U64(pTb->msLastUsed, pTb->cUsed)
2244 : RT_MAKE_U64(pTb->cUsed, pTb->msLastUsed);
2245
2246 /*
2247 * Discard the key if it's smaller than the smallest in the table when it is full.
2248 */
2249 if ( cValid >= cTop
2250 && uSortKey <= aTop[cTop - 1].uSortKey)
2251 { /* discard it */ }
2252 else
2253 {
2254 /*
2255 * Do binary search to find the insert location
2256 */
2257 uint32_t idx;
2258 if (cValid > 0)
2259 {
2260 uint32_t idxEnd = cValid;
2261 uint32_t idxStart = 0;
2262 idx = cValid / 2;
2263 for (;;)
2264 {
2265 if (uSortKey > aTop[idx].uSortKey)
2266 {
2267 if (idx > idxStart)
2268 idxEnd = idx;
2269 else
2270 break;
2271 }
2272 else if (uSortKey < aTop[idx].uSortKey)
2273 {
2274 idx += 1;
2275 if (idx < idxEnd)
2276 idxStart = idx;
2277 else
2278 break;
2279 }
2280 else
2281 {
2282 do
2283 idx++;
2284 while (idx < cValid && uSortKey == aTop[idx].uSortKey);
2285 break;
2286 }
2287 idx = idxStart + (idxEnd - idxStart) / 2;
2288 }
2289 AssertContinue(idx < RT_ELEMENTS(aTop));
2290
2291 /*
2292 * Shift entries as needed.
2293 */
2294 if (cValid >= cTop)
2295 {
2296 if (idx != cTop - 1U)
2297 memmove(&aTop[idx + 1], &aTop[idx], (cTop - idx - 1) * sizeof(aTop[0]));
2298 }
2299 else
2300 {
2301 if (idx != cValid)
2302 memmove(&aTop[idx + 1], &aTop[idx], (cValid - idx) * sizeof(aTop[0]));
2303 cValid++;
2304 }
2305 }
2306 else
2307 {
2308 /* Special case: The first insertion. */
2309 cValid = 1;
2310 idx = 0;
2311 }
2312
2313 /*
2314 * Fill in the new entry.
2315 */
2316 aTop[idx].uSortKey = uSortKey;
2317 aTop[idx].pTb = pTb;
2318 }
2319 }
2320 }
2321 }
2322 }
2323
2324 /*
2325 * Display the result.
2326 */
2327 if (cTop > cValid)
2328 cTop = cValid;
2329 pHlp->pfnPrintf(pHlp, "Displaying the top %u TBs for CPU #%u ordered by %s:\n",
2330 cTop, pVCpu->idCpu, enmTop == kTbTop_MostFrequentlyUsed ? "cUsed" : "msLastUsed");
2331 if (fDisassemble)
2332 pHlp->pfnPrintf(pHlp, "================================================================================\n");
2333
2334 for (uint32_t idx = 0; idx < cTop; idx++)
2335 {
2336 if (fDisassemble && idx)
2337 pHlp->pfnPrintf(pHlp, "\n------------------------------- %u -------------------------------\n", idx);
2338
2339 PCIEMTB const pTb = aTop[idx].pTb;
2340 size_t const idxTbChunk = pTb->idxAllocChunk;
2341 Assert(idxTbChunk < pTbAllocator->cAllocatedChunks);
2342 size_t const idxTbNo = (pTb - &pTbAllocator->aChunks[idxTbChunk].paTbs[0])
2343 + idxTbChunk * pTbAllocator->cTbsPerChunk;
2344 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2345 {
2346# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2347 case IEMTB_F_TYPE_NATIVE:
2348 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - native\n",
2349 idxTbNo, pTb->GCPhysPc, iemR3GetTbFlatPc(pTb), pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2350 if (fDisassemble)
2351 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2352 break;
2353# endif
2354
2355 case IEMTB_F_TYPE_THREADED:
2356 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - threaded\n",
2357 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2358 if (fDisassemble)
2359 iemThreadedDisassembleTb(pTb, pHlp);
2360 break;
2361
2362 default:
2363 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - ???\n",
2364 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2365 break;
2366 }
2367 }
2368}
2369
2370#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
2371
2372
2373#ifdef VBOX_WITH_DEBUGGER
2374
2375/** @callback_method_impl{FNDBGCCMD,
2376 * Implements the '.alliem' command. }
2377 */
2378static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2379{
2380 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
2381 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2382 if (pVCpu)
2383 {
2384 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
2385 return VINF_SUCCESS;
2386 }
2387 RT_NOREF(paArgs, cArgs);
2388 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
2389}
2390
2391
2392/**
2393 * Called by IEMR3Init to register debugger commands.
2394 */
2395static void iemR3RegisterDebuggerCommands(void)
2396{
2397 /*
2398 * Register debugger commands.
2399 */
2400 static DBGCCMD const s_aCmds[] =
2401 {
2402 {
2403 /* .pszCmd = */ "iemflushtlb",
2404 /* .cArgsMin = */ 0,
2405 /* .cArgsMax = */ 0,
2406 /* .paArgDescs = */ NULL,
2407 /* .cArgDescs = */ 0,
2408 /* .fFlags = */ 0,
2409 /* .pfnHandler = */ iemR3DbgFlushTlbs,
2410 /* .pszSyntax = */ "",
2411 /* .pszDescription = */ "Flushed the code and data TLBs"
2412 },
2413 };
2414
2415 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
2416 AssertLogRelRC(rc);
2417}
2418
2419#endif /* VBOX_WITH_DEBUGGER */
2420
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette