VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 106518

Last change on this file since 106518 was 106453, checked in by vboxsync, 3 months ago

VMM/IEM: Eliminated the IEMNATIVE_WITH_SIMD_REG_ALLOCATOR define. Fixed bug in iemNativeEmitMemFetchStoreDataCommon where a SIMD register was masked in calls to iemNativeVarSaveVolatileRegsPreHlpCall and friends. Fixed theoretical loop-forever bugs in iemNativeSimdRegAllocFindFree & iemNativeRegAllocFindFree. bugref:10720

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 146.4 KB
Line 
1/* $Id: IEMR3.cpp 106453 2024-10-17 13:54:35Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39#if defined(VBOX_VMM_TARGET_ARMV8)
40# include "IEMInternal-armv8.h"
41#else
42# include "IEMInternal.h"
43#endif
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/vmapi.h>
46#include <VBox/err.h>
47#ifdef VBOX_WITH_DEBUGGER
48# include <VBox/dbg.h>
49#endif
50
51#include <iprt/assert.h>
52#include <iprt/getopt.h>
53#ifdef IEM_WITH_TLB_TRACE
54# include <iprt/mem.h>
55#endif
56#include <iprt/string.h>
57
58#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
59# include "IEMN8veRecompiler.h"
60# include "IEMThreadedFunctions.h"
61# include "IEMInline.h"
62#endif
63
64
65/*********************************************************************************************************************************
66* Internal Functions *
67*********************************************************************************************************************************/
68static FNDBGFINFOARGVINT iemR3InfoITlb;
69static FNDBGFINFOARGVINT iemR3InfoDTlb;
70#ifdef IEM_WITH_TLB_TRACE
71static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
72#endif
73#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
74static FNDBGFINFOARGVINT iemR3InfoTb;
75static FNDBGFINFOARGVINT iemR3InfoTbTop;
76#endif
77#ifdef VBOX_WITH_DEBUGGER
78static void iemR3RegisterDebuggerCommands(void);
79#endif
80
81
82#if !defined(VBOX_VMM_TARGET_ARMV8)
83static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
84{
85 switch (enmTargetCpu)
86 {
87#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
88 CASE_RET_STR(IEMTARGETCPU_8086);
89 CASE_RET_STR(IEMTARGETCPU_V20);
90 CASE_RET_STR(IEMTARGETCPU_186);
91 CASE_RET_STR(IEMTARGETCPU_286);
92 CASE_RET_STR(IEMTARGETCPU_386);
93 CASE_RET_STR(IEMTARGETCPU_486);
94 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
95 CASE_RET_STR(IEMTARGETCPU_PPRO);
96 CASE_RET_STR(IEMTARGETCPU_CURRENT);
97#undef CASE_RET_STR
98 default: return "Unknown";
99 }
100}
101#endif
102
103
104/**
105 * Initializes the interpreted execution manager.
106 *
107 * This must be called after CPUM as we're quering information from CPUM about
108 * the guest and host CPUs.
109 *
110 * @returns VBox status code.
111 * @param pVM The cross context VM structure.
112 */
113VMMR3DECL(int) IEMR3Init(PVM pVM)
114{
115 /*
116 * Read configuration.
117 */
118#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
119 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
120 int rc;
121#endif
122
123#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
124 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
125 * Controls whether the custom VBox specific CPUID host call interface is
126 * enabled or not. */
127# ifdef DEBUG_bird
128 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
129# else
130 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
131# endif
132 AssertLogRelRCReturn(rc, rc);
133#endif
134
135#ifdef VBOX_WITH_IEM_RECOMPILER
136 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
137 * Max number of TBs per EMT. */
138 uint32_t cMaxTbs = 0;
139 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
140 AssertLogRelRCReturn(rc, rc);
141 if (cMaxTbs < _16K || cMaxTbs > _8M)
142 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
143 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
144
145 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
146 * Initial (minimum) number of TBs per EMT in ring-3. */
147 uint32_t cInitialTbs = 0;
148 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
149 AssertLogRelRCReturn(rc, rc);
150 if (cInitialTbs < _16K || cInitialTbs > _8M)
151 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
152 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
153
154 /* Check that the two values makes sense together. Expect user/api to do
155 the right thing or get lost. */
156 if (cInitialTbs > cMaxTbs)
157 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
158 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
159 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
160
161 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
162 * Max executable memory for recompiled code per EMT. */
163 uint64_t cbMaxExec = 0;
164 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
165 AssertLogRelRCReturn(rc, rc);
166 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
167 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
168 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
169 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
170
171 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
172 * The executable memory allocator chunk size. */
173 uint32_t cbChunkExec = 0;
174 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
175 AssertLogRelRCReturn(rc, rc);
176 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
177 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
178 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
179 cbChunkExec, cbChunkExec, _1M, _256M);
180
181 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
182 * The initial executable memory allocator size (per EMT). The value is
183 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
184 uint64_t cbInitialExec = 0;
185 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
186 AssertLogRelRCReturn(rc, rc);
187 if (cbInitialExec > cbMaxExec)
188 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
189 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
190 cbInitialExec, cbInitialExec, cbMaxExec);
191
192 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
193 * The translation block use count value to do native recompilation at.
194 * Set to zero to disable native recompilation. */
195 uint32_t uTbNativeRecompileAtUsedCount = 16;
196 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
197 AssertLogRelRCReturn(rc, rc);
198
199 /** @cfgm{/IEM/HostICacheInvalidationViaHostAPI, bool, false}
200 * Whether to use any available host OS API for flushing the instruction cache
201 * after completing an translation block. */
202 bool fFlag = false;
203 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationViaHostAPI", &fFlag, false);
204 AssertLogRelRCReturn(rc, rc);
205 uint8_t fHostICacheInvalidation = fFlag ? IEMNATIVE_ICACHE_F_USE_HOST_API : 0;
206
207 /** @cfgm{/IEM/HostICacheInvalidationEndWithIsb, bool, false}
208 * Whether to include an ISB in the instruction cache invalidation sequence
209 * after completing an translation block. */
210 fFlag = false;
211 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationEndWithIsb", &fFlag, false);
212 AssertLogRelRCReturn(rc, rc);
213 if (fFlag)
214 fHostICacheInvalidation |= IEMNATIVE_ICACHE_F_END_WITH_ISH;
215
216#endif /* VBOX_WITH_IEM_RECOMPILER*/
217
218 /*
219 * Initialize per-CPU data and register statistics.
220 */
221#if 1
222 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
223 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
224#else
225 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
226 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
227#endif
228
229 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
230 {
231 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
232 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
233
234 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
235#ifndef VBOX_VMM_TARGET_ARMV8
236 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
237#endif
238 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
239#ifndef VBOX_VMM_TARGET_ARMV8
240 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
241 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
242 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
243 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
244#endif
245
246#ifndef VBOX_VMM_TARGET_ARMV8
247 pVCpu->iem.s.cTbsTillNextTimerPoll = 128;
248 pVCpu->iem.s.cTbsTillNextTimerPollPrev = 128;
249#endif
250
251 /*
252 * Host and guest CPU information.
253 */
254 if (idCpu == 0)
255 {
256 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
257 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
258#if !defined(VBOX_VMM_TARGET_ARMV8)
259 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
260 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
261 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
262# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
263 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
264 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
265 else
266# endif
267 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
268#else
269 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
270 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
271#endif
272
273#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
274 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
275 {
276 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
277 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
278 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
279 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
280 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
281 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
282 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
283 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
284 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
285 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
286 }
287 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
288 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
289 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
290#else
291 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
292 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
293 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
294#endif
295 }
296 else
297 {
298 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
299 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
300 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
301 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
302#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
303 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
304#endif
305 }
306
307 /*
308 * Mark all buffers free.
309 */
310 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
311 while (iMemMap-- > 0)
312 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
313
314#ifdef VBOX_WITH_IEM_RECOMPILER
315 /*
316 * Recompiler state and configuration distribution.
317 */
318 pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
319 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
320 pVCpu->iem.s.fHostICacheInvalidation = fHostICacheInvalidation;
321#endif
322
323#ifdef IEM_WITH_TLB_TRACE
324 /*
325 * Allocate trace buffer.
326 */
327 pVCpu->iem.s.idxTlbTraceEntry = 0;
328 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
329 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
330 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
331 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
332#endif
333 }
334
335
336#ifdef VBOX_WITH_IEM_RECOMPILER
337 /*
338 * Initialize the TB allocator and cache (/ hash table).
339 *
340 * This is done by each EMT to try get more optimal thread/numa locality of
341 * the allocations.
342 */
343 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
344 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
345 AssertLogRelRCReturn(rc, rc);
346#endif
347
348 /*
349 * Register statistics.
350 */
351 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
352 {
353#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
354 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
355 char szPat[128];
356 RT_NOREF_PV(szPat); /* lazy bird */
357 char szVal[128];
358 RT_NOREF_PV(szVal); /* lazy bird */
359
360 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
361 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
362 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
363 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
364 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
365 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
366 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
367 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
368 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
369 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
370 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
371 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
372 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
373 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
374 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
375 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
376 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
377 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
378 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
379 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
380
381 /* Code TLB: */
382 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
383 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
384 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
385 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
386 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
387 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
388 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
389 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
390 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
391 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
392
393 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
394 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
395 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
396 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
397 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
398 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
399
400 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
401 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
402 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
403 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
404 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
405 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
406
407 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
408 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
409 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
410 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
411 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
412 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
413
414 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
415 "Code TLB page invalidation requests", "/IEM/CPU%u/Tlb/Code/InvlPg", idCpu);
416 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
417 "Code TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeGlobal", idCpu);
418 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
419 "Code TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeNonGlobal", idCpu);
420
421 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
422 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
423 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
424 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
425 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
426 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
427# ifdef IEM_WITH_TLB_STATISTICS
428 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
429 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
430# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
431 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
432 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
433 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
434 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
435# endif
436
437 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
438 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
439 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
440
441 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
442 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
443 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
444
445 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
446 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
447 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
448 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
449
450# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
451 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
452 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
453 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
454 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
455 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
456 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
457 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
458 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
459 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
460 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
461 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
462 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
463 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
464 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
465 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
466
467 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
468 "Code TLB native misses on new page",
469 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
470 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
471 "Code TLB native misses on new page w/ offset",
472 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
473# endif
474# endif /* IEM_WITH_TLB_STATISTICS */
475
476 /* Data TLB organized as best we can... */
477 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
478 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
479 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
480 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
481 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
482 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
483 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
484 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
485 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
486 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
487
488 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
489 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
490 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
491 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
492 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
493 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
494
495 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
496 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
497 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
498 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
499 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
500 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
501
502 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
503 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
504 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
505 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
506 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
507 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
508
509 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
510 "Data TLB page invalidation requests", "/IEM/CPU%u/Tlb/Data/InvlPg", idCpu);
511 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
512 "Data TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeGlobal", idCpu);
513 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
514 "Data TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeNonGlobal", idCpu);
515
516 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
517 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
518 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
519 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
520 "Data TLB global loads",
521 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
522 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
523 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
524 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
525 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
526 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
527 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
528 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
529 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
530 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
531
532 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
533 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
534 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
535 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
536 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
537 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
538 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
539 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
540 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
541 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
542 "Data TLB global loads",
543 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
544
545# ifdef IEM_WITH_TLB_STATISTICS
546# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
547 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
548 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
549 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
550 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
551 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
552 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
553 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
554 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
555 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
556 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
557 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
558 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
559 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
560 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
561 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
562# endif
563# endif
564
565# ifdef IEM_WITH_TLB_STATISTICS
566 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
567 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
568 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
569 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
570 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
571 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
572# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
573 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
574 "Data TLB native stack access hits",
575 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
576 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
577 "Data TLB native data fetch hits",
578 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
579 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
580 "Data TLB native data store hits",
581 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
582 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
583 "Data TLB native mapped data hits",
584 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
585# endif
586 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
587 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
588 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
589
590# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
591 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
592 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
593 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
594# endif
595
596 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
597 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
598 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
599
600 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
601 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
602 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
603 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
604
605# endif /* IEM_WITH_TLB_STATISTICS */
606
607
608#ifdef VBOX_WITH_IEM_RECOMPILER
609 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
610 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
611 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
612 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
613 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
614 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
615# ifdef VBOX_WITH_STATISTICS
616 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
617 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
618 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
619 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
620# endif
621
622# ifdef VBOX_WITH_STATISTICS
623 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPoll, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
624 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll", idCpu);
625 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollRun, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
626 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll/Running", idCpu);
627 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollUnchanged, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
628 "Timer polling interval unchanged", "/IEM/CPU%u/re/TimerPoll/Unchanged", idCpu);
629 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollTiny, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
630 "Timer polling interval tiny", "/IEM/CPU%u/re/TimerPoll/Tiny", idCpu);
631 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollDefaultCalc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
632 "Timer polling interval calculated using defaults", "/IEM/CPU%u/re/TimerPoll/DefaultCalc", idCpu);
633 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollMax, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
634 "Timer polling interval maxed out", "/IEM/CPU%u/re/TimerPoll/Max", idCpu);
635 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorDivision, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE,
636 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorDivision", idCpu);
637 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorMultiplication, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
638 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorMultiplication", idCpu);
639# endif
640 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbsTillNextTimerPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
641 "Timer polling interval (in TBs)", "/IEM/CPU%u/re/TimerPollInterval", idCpu);
642
643 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
644 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
645 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
646 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
647 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
648# ifdef VBOX_WITH_STATISTICS
649 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
650 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
651# endif
652 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
653 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
654 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
655 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
656 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
657 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
658 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
659 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
660 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
661 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
662 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
663 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
664 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
665 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
666 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
667 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
668
669 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
670 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
671 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
672
673 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
674 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
675 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
676 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
677 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
678 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
679 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
680 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
681# ifdef VBOX_WITH_STATISTICS
682 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
683 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
684# endif
685
686 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
687 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
688 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
689 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
690 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
691 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
692
693 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
694 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
695 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckTimersBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
696 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckTimersBreaks", idCpu);
697 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
698 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
699 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
700 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
701 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
702 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
703
704 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
705 "Detected loop full TB", "/IEM/CPU%u/re/LoopFullTbDetected", idCpu);
706 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected2, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
707 "Detected loop full TB but looping back to before the first TB instruction",
708 "/IEM/CPU%u/re/LoopFullTbDetected2", idCpu);
709 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
710 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
711
712 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
713 "Number of times the exec memory allocator failed to allocate a large enough buffer",
714 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
715
716 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
717 "Number of threaded calls per TB that have been properly recompiled to native code",
718 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
719 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
720 "Number of threaded calls per TB that could not be recompiler to native code",
721 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
722 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
723 "Number of threaded calls that could not be recompiler to native code",
724 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
725
726 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
727 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
728 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
729 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
730
731# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
732# ifdef VBOX_WITH_STATISTICS
733 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
734 "Number of calls to iemNativeRegAllocFindFree.",
735 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
736# endif
737 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
738 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
739 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
740# ifdef VBOX_WITH_STATISTICS
741 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
742 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
743 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
744 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
745 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
746 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
747 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
748 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
749 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
750
751# define REG_NATIVE_EFL_GROUP(a_Lower, a_Camel) do { \
752 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponed ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
753 "Postponed all status flag updating, " #a_Lower " instructions", \
754 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
755 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkipped ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
756 "Skipped all status flag updating, " #a_Lower " instructions", \
757 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
758 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflTotal ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
759 "Total number of " #a_Lower " intructions with status flag updating", \
760 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
761 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
762 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
763 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
764 "Postponed all status flag updating, " #a_Lower " instructions, percentage", \
765 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "PostponedPct", idCpu); \
766 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
767 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
768 "Skipped all status flag updating, " #a_Lower " instructions, percentage", \
769 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "SkippedPct", idCpu); \
770 } while (0)
771 REG_NATIVE_EFL_GROUP(arithmetic, Arithmetic);
772 REG_NATIVE_EFL_GROUP(logical, Logical);
773 REG_NATIVE_EFL_GROUP(shift, Shift);
774# undef REG_NATIVE_EFL_GROUP
775
776 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponedEmits, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
777 "Postponed EFLAGS calculation emits", "/IEM/CPU%u/re/NativeEFlags/ZZEmits", idCpu);
778
779 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
780 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippable", idCpu);
781 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippable", idCpu);
782 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippable", idCpu);
783 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippable", idCpu);
784 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippable", idCpu);
785
786 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfRequired", idCpu);
787 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfRequired", idCpu);
788 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfRequired", idCpu);
789 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfRequired", idCpu);
790 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfRequired", idCpu);
791 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfRequired", idCpu);
792
793# ifdef IEMLIVENESS_EXTENDED_LAYOUT
794 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfDelayable", idCpu);
795 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfDelayable", idCpu);
796 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfDelayable", idCpu);
797 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfDelayable", idCpu);
798 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfDelayable", idCpu);
799 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfDelayable", idCpu);
800# endif
801
802 /* Sum up all status bits ('_' is a sorting hack). */
803 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fSkippable*", idCpu);
804 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
805 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
806
807 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fRequired*", idCpu);
808 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
809 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
810
811# ifdef IEMLIVENESS_EXTENDED_LAYOUT
812 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fDelayable*", idCpu);
813 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
814 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
815# endif
816
817 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?f*", idCpu);
818 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
819 "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
820
821 /* Corresponding ratios / percentages of the totals. */
822 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
823 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
824 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
825 "Total skippable EFLAGS status bit updating percentage",
826 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippablePct", idCpu);
827
828 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
829 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
830 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
831 "Total required EFLAGS status bit updating percentage",
832 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequiredPct", idCpu);
833
834# ifdef IEMLIVENESS_EXTENDED_LAYOUT
835 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
836 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
837 "Total potentially delayable EFLAGS status bit updating percentage",
838 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayablePct", idCpu);
839# endif
840
841 /* Ratios of individual bits. */
842 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/Cf*", idCpu) - 3;
843 Assert(szPat[offFlagChar] == 'C');
844 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
845 Assert(szVal[offFlagChar] == 'C');
846 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippablePct", idCpu);
847 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippablePct", idCpu);
848 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippablePct", idCpu);
849 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippablePct", idCpu);
850 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippablePct", idCpu);
851 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippablePct", idCpu);
852
853 /* PC updates total and skipped, with PCT ratio. */
854 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
855 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
856 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
857 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
858 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
859 "Delayed RIP updating percentage",
860 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
861
862# endif /* VBOX_WITH_STATISTICS */
863# ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
864 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEndIfOtherBranchDirty, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
865 "IEM_MC_ENDIF flushing dirty shadow registers for other branch (not good).",
866 "/IEM/CPU%u/re/NativeEndIfOtherBranchDirty", idCpu);
867# endif
868# ifdef VBOX_WITH_STATISTICS
869 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
870 "Number of calls to iemNativeSimdRegAllocFindFree.",
871 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
872 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
873 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
874 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
875 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
876 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
877 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
878 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
879 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
880 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
881 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
882 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
883 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
884
885 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
886 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
887 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
888 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
889 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
890 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
891 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
892 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
893
894 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
895 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
896 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
897 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
898 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
899 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
900 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
901 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
902
903 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
904 "Number of times the TB finishes execution completely",
905 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
906# endif /* VBOX_WITH_STATISTICS */
907 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
908 "Number of times the TB finished through the ReturnBreak label",
909 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
910 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
911 "Number of times the TB finished through the ReturnBreak label",
912 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
913 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
914 "Number of times the TB finished through the ReturnWithFlags label",
915 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
916 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
917 "Number of times the TB finished with some other status value",
918 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
919 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
920 "Number of times the TB finished via long jump / throw",
921 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
922 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
923 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
924 "Number of times the TB finished through the ObsoleteTb label",
925 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
926 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
927 "Number of times the TB finished through the NeedCsLimChecking label",
928 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
929 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
930 "Number of times the TB finished through the CheckBranchMiss label",
931 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
932 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
933 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
934# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
935# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
936# else
937# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
938# endif
939 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
940 "Number of times the TB finished raising a #DE exception",
941 RAISE_PREFIX "RaiseDe", idCpu);
942 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
943 "Number of times the TB finished raising a #UD exception",
944 RAISE_PREFIX "RaiseUd", idCpu);
945 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
946 "Number of times the TB finished raising a SSE related exception",
947 RAISE_PREFIX "RaiseSseRelated", idCpu);
948 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
949 "Number of times the TB finished raising a AVX related exception",
950 RAISE_PREFIX "RaiseAvxRelated", idCpu);
951 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
952 "Number of times the TB finished raising a SSE/AVX floating point related exception",
953 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
954 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
955 "Number of times the TB finished raising a #NM exception",
956 RAISE_PREFIX "RaiseNm", idCpu);
957 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
958 "Number of times the TB finished raising a #GP(0) exception",
959 RAISE_PREFIX "RaiseGp0", idCpu);
960 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
961 "Number of times the TB finished raising a #MF exception",
962 RAISE_PREFIX "RaiseMf", idCpu);
963 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
964 "Number of times the TB finished raising a #XF exception",
965 RAISE_PREFIX "RaiseXf", idCpu);
966
967# ifdef VBOX_WITH_STATISTICS
968 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLoopFullTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
969 "Number of full TB loops.",
970 "/IEM/CPU%u/re/NativeTbExit/LoopFullTb", idCpu);
971# endif
972
973 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
974 "Direct linking #1 with IRQ check succeeded",
975 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
976 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
977 "Direct linking #1 w/o IRQ check succeeded",
978 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
979# ifdef VBOX_WITH_STATISTICS
980 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
981 "Direct linking #1 failed: No TB in lookup table",
982 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
983 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
984 "Direct linking #1 failed: GCPhysPc mismatch",
985 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
986 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
987 "Direct linking #1 failed: TB flags mismatch",
988 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
989 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
990 "Direct linking #1 failed: IRQ or FF pending",
991 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
992# endif
993
994 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
995 "Direct linking #2 with IRQ check succeeded",
996 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
997 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
998 "Direct linking #2 w/o IRQ check succeeded",
999 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
1000# ifdef VBOX_WITH_STATISTICS
1001 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1002 "Direct linking #2 failed: No TB in lookup table",
1003 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
1004 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1005 "Direct linking #2 failed: GCPhysPc mismatch",
1006 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
1007 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1008 "Direct linking #2 failed: TB flags mismatch",
1009 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
1010 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1011 "Direct linking #2 failed: IRQ or FF pending",
1012 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
1013# endif
1014
1015 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
1016 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
1017 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
1018 "/IEM/CPU%u/re/NativeTbExit", idCpu);
1019
1020
1021# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
1022
1023
1024# ifdef VBOX_WITH_STATISTICS
1025 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1026 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
1027 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1028 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
1029 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1030 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
1031 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1032 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
1033# endif
1034
1035
1036#endif /* VBOX_WITH_IEM_RECOMPILER */
1037
1038 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
1039 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1040 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
1041 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
1042 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1043 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
1044
1045# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
1046 /* Instruction statistics: */
1047# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
1048 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1049 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
1050 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1051 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
1052# include "IEMInstructionStatisticsTmpl.h"
1053# undef IEM_DO_INSTR_STAT
1054# endif
1055
1056# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1057 /* Threaded function statistics: */
1058 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
1059 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
1060 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
1061# endif
1062
1063
1064 for (unsigned i = 1; i < RT_ELEMENTS(pVCpu->iem.s.aStatAdHoc); i++)
1065 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatAdHoc[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1066 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/AdHoc/%02u", idCpu, i);
1067
1068#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
1069 }
1070
1071#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1072 /*
1073 * Register the per-VM VMX APIC-access page handler type.
1074 */
1075 if (pVM->cpum.ro.GuestFeatures.fVmx)
1076 {
1077 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
1078 iemVmxApicAccessPageHandler,
1079 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
1080 AssertLogRelRCReturn(rc, rc);
1081 }
1082#endif
1083
1084 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1085 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1086#ifdef IEM_WITH_TLB_TRACE
1087 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
1088#endif
1089#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1090 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
1091 DBGFR3InfoRegisterInternalArgv(pVM, "tbtop", "IEM translation blocks most used or most recently used",
1092 iemR3InfoTbTop, DBGFINFO_FLAGS_RUN_ON_EMT);
1093#endif
1094#ifdef VBOX_WITH_DEBUGGER
1095 iemR3RegisterDebuggerCommands();
1096#endif
1097
1098 return VINF_SUCCESS;
1099}
1100
1101
1102VMMR3DECL(int) IEMR3Term(PVM pVM)
1103{
1104 NOREF(pVM);
1105#ifdef IEM_WITH_TLB_TRACE
1106 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1107 {
1108 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1109 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1110 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1111 }
1112#endif
1113#if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) && defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
1114 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1115 iemThreadedSaveTbForProfilingCleanup(pVM->apCpusR3[idCpu]);
1116#endif
1117 return VINF_SUCCESS;
1118}
1119
1120
1121VMMR3DECL(void) IEMR3Relocate(PVM pVM)
1122{
1123 RT_NOREF(pVM);
1124}
1125
1126
1127/**
1128 * Gets the name of a generic IEM exit code.
1129 *
1130 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1131 * @param uExit The IEM exit to name.
1132 */
1133VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1134{
1135 static const char * const s_apszNames[] =
1136 {
1137 /* external interrupts */
1138 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1139 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1140 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1141 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1142 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1143 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1144 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1145 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1146 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1147 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1148 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1149 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1150 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1151 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1152 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1153 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1154 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1155 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1156 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1157 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1158 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1159 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1160 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1161 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1162 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1163 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1164 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1165 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1166 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1167 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1168 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1169 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1170 /* software interrups */
1171 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1172 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1173 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1174 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1175 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1176 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1177 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1178 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1179 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1180 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1181 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1182 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1183 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1184 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1185 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1186 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1187 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1188 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1189 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1190 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1191 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1192 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1193 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1194 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1195 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1196 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1197 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1198 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1199 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1200 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1201 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1202 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1203 };
1204 if (uExit < RT_ELEMENTS(s_apszNames))
1205 return s_apszNames[uExit];
1206 return NULL;
1207}
1208
1209
1210/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1211static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1212{
1213 if (*pfHeader)
1214 return;
1215 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1216 *pfHeader = true;
1217}
1218
1219
1220#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1221#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1222
1223/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1224static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1225 uint32_t uSlot, uint32_t fFlags)
1226{
1227#ifndef VBOX_VMM_TARGET_ARMV8
1228 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1229#else
1230 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1231#endif
1232 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1233 return;
1234
1235 /* The address needs to be sign extended, thus the shifting fun here.*/
1236 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1237 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1238 const char *pszValid = "";
1239#ifndef VBOX_VMM_TARGET_ARMV8
1240 char szTmp[128];
1241 if (fFlags & IEMR3INFOTLB_F_CHECK)
1242 {
1243 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1244 PGMPTWALKFAST WalkFast;
1245 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1246 pszValid = szTmp;
1247 if (RT_FAILURE(rc))
1248 switch (rc)
1249 {
1250 case VERR_PAGE_TABLE_NOT_PRESENT:
1251 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1252 {
1253 case 1: pszValid = " stale(page-not-present)"; break;
1254 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1255 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1256 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1257 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1258 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1259 }
1260 break;
1261 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1262 }
1263 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1264 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1265 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1266 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1267 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1268 | fInvSlotG ) )
1269 pszValid = " still-valid";
1270 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1271 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1272 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1273 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1274 {
1275 case X86_PTE_A:
1276 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1277 break;
1278 case X86_PTE_D:
1279 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1280 break;
1281 case X86_PTE_D | X86_PTE_A:
1282 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1283 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1284 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1285 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1286 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1287 break;
1288 default: AssertFailed(); break;
1289 }
1290 else
1291 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1292 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1293 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1294 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1295 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1296 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1297 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1298 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1299 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1300 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1301 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1302 }
1303#else
1304 RT_NOREF(pVCpu);
1305#endif
1306
1307 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1308 uSlot,
1309 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1310 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1311 : "expired",
1312 GCPtr, /* -> */
1313 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1314 /* / */
1315 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1316 /* */
1317 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1318 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1319 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1320 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1321 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1322 !(uSlot & 1) ? "-" : "G",
1323 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1324 /* / */
1325 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1326 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1327 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1328 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1329 /* / */
1330 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1331 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1332 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1333 pszValid);
1334}
1335
1336
1337/** Displays one or more TLB slots. */
1338static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1339 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1340{
1341 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1342 {
1343 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1344 {
1345 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1346 cSlots, RT_ELEMENTS(pTlb->aEntries));
1347 cSlots = RT_ELEMENTS(pTlb->aEntries);
1348 }
1349
1350 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1351 while (cSlots-- > 0)
1352 {
1353 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1354 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1355 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1356 }
1357 }
1358 else
1359 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1360 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1361}
1362
1363
1364/** Displays the TLB slot for the given address. */
1365static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1366 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1367{
1368 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1369
1370 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(uAddress);
1371#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1372 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1373#else
1374 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1375#endif
1376 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1377#ifndef VBOX_VMM_TARGET_ARMV8
1378 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1379#endif
1380 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1381 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1382 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1383 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1384
1385#ifndef VBOX_VMM_TARGET_ARMV8
1386 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1387 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1388 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1389 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1390#endif
1391}
1392
1393
1394/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1395static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1396{
1397 /*
1398 * This is entirely argument driven.
1399 */
1400 static RTGETOPTDEF const s_aOptions[] =
1401 {
1402 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1403 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1404 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1405 { "all", 'A', RTGETOPT_REQ_NOTHING },
1406 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1407 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1408 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1409 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1410 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1411 };
1412
1413 RTGETOPTSTATE State;
1414 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1415 AssertRCReturnVoid(rc);
1416
1417 uint32_t cActionArgs = 0;
1418 bool fNeedHeader = true;
1419 bool fAddressMode = true;
1420 uint32_t fFlags = 0;
1421 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1422 PVMCPU pVCpu = pVCpuCall;
1423 if (!pVCpu)
1424 pVCpu = VMMGetCpuById(pVM, 0);
1425
1426 RTGETOPTUNION ValueUnion;
1427 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1428 {
1429 switch (rc)
1430 {
1431 case 'c':
1432 if (ValueUnion.u32 >= pVM->cCpus)
1433 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1434 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1435 {
1436 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1437 fNeedHeader = true;
1438 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1439 {
1440 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1441 ValueUnion.u32, pVCpuCall->idCpu);
1442 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1443 }
1444 }
1445 break;
1446
1447 case 'C':
1448 if (!pVCpuCall)
1449 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1450 else if (pVCpu != pVCpuCall)
1451 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1452 pVCpu->idCpu, pVCpuCall->idCpu);
1453 else
1454 fFlags |= IEMR3INFOTLB_F_CHECK;
1455 break;
1456
1457 case 'a':
1458 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1459 ValueUnion.u64, fFlags, &fNeedHeader);
1460 fAddressMode = true;
1461 cActionArgs++;
1462 break;
1463
1464 case 'A':
1465 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1466 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1467 cActionArgs++;
1468 break;
1469
1470 case 'r':
1471 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1472 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1473 fAddressMode = false;
1474 cActionArgs++;
1475 break;
1476
1477 case 's':
1478 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1479 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1480 fAddressMode = false;
1481 cActionArgs++;
1482 break;
1483
1484 case 'v':
1485 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1486 break;
1487
1488 case VINF_GETOPT_NOT_OPTION:
1489 if (fAddressMode)
1490 {
1491 uint64_t uAddr;
1492 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1493 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1494 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1495 uAddr, fFlags, &fNeedHeader);
1496 else
1497 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1498 }
1499 else
1500 {
1501 uint32_t uSlot;
1502 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1503 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1504 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1505 uSlot, 1, fFlags, &fNeedHeader);
1506 else
1507 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1508 }
1509 cActionArgs++;
1510 break;
1511
1512 case 'h':
1513 pHlp->pfnPrintf(pHlp,
1514 "Usage: info %ctlb [options]\n"
1515 "\n"
1516 "Options:\n"
1517 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1518 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1519 " -C,--check\n"
1520 " Check valid entries against guest PTs.\n"
1521 " -A, --all, all\n"
1522 " Display all the TLB entries (default if no other args).\n"
1523 " -a<virt>, --address=<virt>\n"
1524 " Shows the TLB entry for the specified guest virtual address.\n"
1525 " -r<slot:count>, --range=<slot:count>\n"
1526 " Shows the TLB entries for the specified slot range.\n"
1527 " -s<slot>,--slot=<slot>\n"
1528 " Shows the given TLB slot.\n"
1529 " -v,--only-valid\n"
1530 " Only show valid TLB entries (TAG, not phys)\n"
1531 "\n"
1532 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1533 "defaulting to addresses if not preceeded by any of those options.\n"
1534 , fITlb ? 'i' : 'd');
1535 return;
1536
1537 default:
1538 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1539 return;
1540 }
1541 }
1542
1543 /*
1544 * If no action taken, we display all (-A) by default.
1545 */
1546 if (!cActionArgs)
1547 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1548 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1549}
1550
1551
1552/**
1553 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1554 */
1555static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1556{
1557 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1558}
1559
1560
1561/**
1562 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1563 */
1564static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1565{
1566 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1567}
1568
1569
1570#ifdef IEM_WITH_TLB_TRACE
1571/**
1572 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1573 */
1574static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1575{
1576 /*
1577 * Parse arguments.
1578 */
1579 static RTGETOPTDEF const s_aOptions[] =
1580 {
1581 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1582 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1583 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1584 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1585 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1586 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1587 };
1588
1589 RTGETOPTSTATE State;
1590 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1591 AssertRCReturnVoid(rc);
1592
1593 uint32_t cLimit = UINT32_MAX;
1594 bool fStopAtGlobalFlush = false;
1595 bool fResolveRip = false;
1596 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1597 PVMCPU pVCpu = pVCpuCall;
1598 if (!pVCpu)
1599 pVCpu = VMMGetCpuById(pVM, 0);
1600
1601 RTGETOPTUNION ValueUnion;
1602 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1603 {
1604 switch (rc)
1605 {
1606 case 'c':
1607 if (ValueUnion.u32 >= pVM->cCpus)
1608 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1609 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1610 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1611 break;
1612
1613 case 'l':
1614 cLimit = ValueUnion.u32;
1615 break;
1616
1617 case 'g':
1618 fStopAtGlobalFlush = true;
1619 break;
1620
1621 case 'r':
1622 fResolveRip = true;
1623 break;
1624
1625 case 'h':
1626 pHlp->pfnPrintf(pHlp,
1627 "Usage: info tlbtrace [options] [n]\n"
1628 "\n"
1629 "Options:\n"
1630 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1631 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1632 " [n], -l<n>, --last=<n>\n"
1633 " Limit display to the last N entries. Default: all\n"
1634 " -g, --stop-at-global-flush\n"
1635 " Stop after the first global flush entry.\n"
1636 " -r, --resolve-rip\n"
1637 " Resolve symbols for the flattened RIP addresses.\n"
1638 );
1639 return;
1640
1641 case VINF_GETOPT_NOT_OPTION:
1642 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1643 if (RT_SUCCESS(rc))
1644 break;
1645 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1646 return;
1647
1648 default:
1649 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1650 return;
1651 }
1652 }
1653
1654 /*
1655 * Get the details.
1656 */
1657 AssertReturnVoid(pVCpu);
1658 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1659 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1660 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1661 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1662 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1663 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1664 if (cLeft && paEntries)
1665 {
1666 /*
1667 * Display the entries.
1668 */
1669 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1670 while (cLeft-- > 0)
1671 {
1672 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1673 const char *pszSymbol = "";
1674 union
1675 {
1676 RTDBGSYMBOL Symbol;
1677 char ach[sizeof(RTDBGSYMBOL) + 32];
1678 } uBuf;
1679 if (fResolveRip)
1680 {
1681 RTGCINTPTR offDisp = 0;
1682 DBGFADDRESS Addr;
1683 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1684 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1685 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1686 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1687 &offDisp, &uBuf.Symbol, NULL);
1688 if (RT_SUCCESS(rc))
1689 {
1690 /* Add displacement. */
1691 if (offDisp)
1692 {
1693 size_t const cchName = strlen(uBuf.Symbol.szName);
1694 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1695 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1696 if (offDisp > 0)
1697 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1698 else
1699 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1700 }
1701
1702 /* Put a space before it. */
1703 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1704 char *pszName = uBuf.Symbol.szName;
1705 *--pszName = ' ';
1706 pszSymbol = pszName;
1707 }
1708 }
1709 static const char *s_apszTlbType[2] = { "code", "data" };
1710 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1711 switch (pCur->enmType)
1712 {
1713 case kIemTlbTraceType_InvlPg:
1714 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1715 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param), pszSymbol);
1716 break;
1717 case kIemTlbTraceType_EvictSlot:
1718 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1719 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1720 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1721 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1722 pCur->u64Param2, pszSymbol);
1723 break;
1724 case kIemTlbTraceType_LargeEvictSlot:
1725 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1726 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1727 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1728 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1729 pCur->u64Param2, pszSymbol);
1730 break;
1731 case kIemTlbTraceType_LargeScan:
1732 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1733 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1734 break;
1735
1736 case kIemTlbTraceType_Flush:
1737 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1738 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1739 break;
1740 case kIemTlbTraceType_FlushGlobal:
1741 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1742 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1743 if (fStopAtGlobalFlush)
1744 return;
1745 break;
1746 case kIemTlbTraceType_Load:
1747 case kIemTlbTraceType_LoadGlobal:
1748 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1749 idx, pCur->rip,
1750 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1751 pCur->u64Param,
1752 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)
1753 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1754 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1755 break;
1756
1757 case kIemTlbTraceType_Load_Cr0:
1758 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1759 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1760 break;
1761 case kIemTlbTraceType_Load_Cr3:
1762 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1763 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1764 break;
1765 case kIemTlbTraceType_Load_Cr4:
1766 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1767 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1768 break;
1769 case kIemTlbTraceType_Load_Efer:
1770 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1771 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1772 break;
1773
1774 case kIemTlbTraceType_Irq:
1775 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1776 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1777 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1778 pszSymbol);
1779 break;
1780 case kIemTlbTraceType_Xcpt:
1781 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1782 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1783 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1784 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1785 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1786 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1787 else
1788 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1789 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1790 break;
1791 case kIemTlbTraceType_IRet:
1792 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1793 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1794 break;
1795
1796 case kIemTlbTraceType_Tb_Compile:
1797 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1798 idx, pCur->rip, pCur->u64Param, pszSymbol);
1799 break;
1800 case kIemTlbTraceType_Tb_Exec_Threaded:
1801 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1802 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1803 break;
1804 case kIemTlbTraceType_Tb_Exec_Native:
1805 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1806 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1807 break;
1808
1809 case kIemTlbTraceType_User0:
1810 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1811 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1812 break;
1813 case kIemTlbTraceType_User1:
1814 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1815 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1816 break;
1817 case kIemTlbTraceType_User2:
1818 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1819 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1820 break;
1821 case kIemTlbTraceType_User3:
1822 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1823 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1824 break;
1825
1826 case kIemTlbTraceType_Invalid:
1827 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1828 break;
1829 }
1830 }
1831 }
1832 else
1833 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1834}
1835#endif /* IEM_WITH_TLB_TRACE */
1836
1837#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1838
1839/**
1840 * Get get compile time flat PC for the TB.
1841 */
1842DECL_FORCE_INLINE(RTGCPTR) iemR3GetTbFlatPc(PCIEMTB pTb)
1843{
1844#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
1845 if (pTb->fFlags & IEMTB_F_TYPE_NATIVE)
1846 {
1847 PCIEMTBDBG const pDbgInfo = pTb->pDbgInfo;
1848 return pDbgInfo ? pDbgInfo->FlatPc : RTGCPTR_MAX;
1849 }
1850#endif
1851 return pTb->FlatPc;
1852}
1853
1854
1855/**
1856 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1857 */
1858static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1859{
1860 /*
1861 * Parse arguments.
1862 */
1863 static RTGETOPTDEF const s_aOptions[] =
1864 {
1865 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1866 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1867 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1868 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1869 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1870 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1871 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1872 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1873 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1874 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1875 { "--tb", 't', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1876 { "--tb-id", 't', RTGETOPT_REQ_UINT32 },
1877 };
1878
1879 RTGETOPTSTATE State;
1880 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1881 AssertRCReturnVoid(rc);
1882
1883 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1884 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1885 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1886 RTGCPHYS GCVirt = NIL_RTGCPTR;
1887 uint32_t fFlags = UINT32_MAX;
1888 uint32_t idTb = UINT32_MAX;
1889
1890 RTGETOPTUNION ValueUnion;
1891 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1892 {
1893 switch (rc)
1894 {
1895 case 'c':
1896 if (ValueUnion.u32 >= pVM->cCpus)
1897 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1898 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1899 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1900 break;
1901
1902 case 'a':
1903 GCVirt = ValueUnion.u64;
1904 GCPhysPc = NIL_RTGCPHYS;
1905 idTb = UINT32_MAX;
1906 break;
1907
1908 case 'p':
1909 GCVirt = NIL_RTGCPHYS;
1910 GCPhysPc = ValueUnion.u64;
1911 idTb = UINT32_MAX;
1912 break;
1913
1914 case 'f':
1915 fFlags = ValueUnion.u32;
1916 break;
1917
1918 case 't':
1919 GCVirt = NIL_RTGCPHYS;
1920 GCPhysPc = NIL_RTGCPHYS;
1921 idTb = ValueUnion.u32;
1922 break;
1923
1924 case VINF_GETOPT_NOT_OPTION:
1925 {
1926 if ( (ValueUnion.psz[0] == 'T' || ValueUnion.psz[0] == 't')
1927 && (ValueUnion.psz[1] == 'B' || ValueUnion.psz[1] == 'b')
1928 && ValueUnion.psz[2] == '#')
1929 {
1930 rc = RTStrToUInt32Full(&ValueUnion.psz[3], 0, &idTb);
1931 if (RT_SUCCESS(rc))
1932 {
1933 GCVirt = NIL_RTGCPHYS;
1934 GCPhysPc = NIL_RTGCPHYS;
1935 break;
1936 }
1937 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to TD ID: %Rrc\n", ValueUnion.psz, rc);
1938 }
1939 else
1940 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1941 return;
1942 }
1943
1944 case 'h':
1945 pHlp->pfnPrintf(pHlp,
1946 "Usage: info tb [options]\n"
1947 "\n"
1948 "Options:\n"
1949 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1950 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1951 " -a<virt>, --address=<virt>\n"
1952 " Shows the TB for the specified guest virtual address.\n"
1953 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1954 " Shows the TB for the specified guest physical address.\n"
1955 " -t<id>, --tb=<id>, --tb-id=<id>, TD#<id>\n"
1956 " Show the TB specified by the identifier/number (from tbtop).\n"
1957 " -f<flags>,--flags=<flags>\n"
1958 " The TB flags value (hex) to use when looking up the TB.\n"
1959 "\n"
1960 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1961 return;
1962
1963 default:
1964 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1965 return;
1966 }
1967 }
1968
1969 /* Currently, only do work on the same EMT. */
1970 if (pVCpu != pVCpuThis)
1971 {
1972 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1973 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1974 return;
1975 }
1976
1977 /*
1978 * Defaults.
1979 */
1980 if (GCPhysPc == NIL_RTGCPHYS && idTb == UINT32_MAX)
1981 {
1982 if (GCVirt == NIL_RTGCPTR)
1983 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1984 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1985 if (RT_FAILURE(rc))
1986 {
1987 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1988 return;
1989 }
1990 }
1991 if (fFlags == UINT32_MAX && idTb == UINT32_MAX)
1992 {
1993 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1994 fFlags = iemCalcExecFlags(pVCpu);
1995 if (pVM->cCpus == 1)
1996 fFlags |= IEM_F_X86_DISREGARD_LOCK;
1997 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
1998 fFlags |= IEMTB_F_INHIBIT_SHADOW;
1999 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
2000 fFlags |= IEMTB_F_INHIBIT_NMI;
2001 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
2002 {
2003 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
2004 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
2005 fFlags |= IEMTB_F_CS_LIM_CHECKS;
2006 }
2007 }
2008
2009 PCIEMTB pTb;
2010 if (idTb == UINT32_MAX)
2011 {
2012 /*
2013 * Do the lookup...
2014 *
2015 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
2016 * have much choice since we don't want to increase use counters and
2017 * trigger native recompilation.
2018 */
2019 fFlags &= IEMTB_F_KEY_MASK;
2020 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
2021 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
2022 pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
2023 while (pTb)
2024 {
2025 if (pTb->GCPhysPc == GCPhysPc)
2026 {
2027 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
2028 {
2029 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
2030 break;
2031 }
2032 }
2033 pTb = pTb->pNext;
2034 }
2035 if (!pTb)
2036 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
2037 }
2038 else
2039 {
2040 /*
2041 * Use the TB ID for indexing.
2042 */
2043 pTb = NULL;
2044 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2045 if (pTbAllocator)
2046 {
2047 size_t const idxTbChunk = idTb / pTbAllocator->cTbsPerChunk;
2048 size_t const idxTbInChunk = idTb % pTbAllocator->cTbsPerChunk;
2049 if (idxTbChunk < pTbAllocator->cAllocatedChunks)
2050 pTb = &pTbAllocator->aChunks[idxTbChunk].paTbs[idxTbInChunk];
2051 else
2052 pHlp->pfnPrintf(pHlp, "Invalid TB ID: %u (%#x)\n", idTb, idTb);
2053 }
2054 }
2055
2056 if (pTb)
2057 {
2058 /*
2059 * Disassemble according to type.
2060 */
2061 size_t const idxTbChunk = pTb->idxAllocChunk;
2062 size_t const idxTbNo = (pTb - &pVCpu->iem.s.pTbAllocatorR3->aChunks[idxTbChunk].paTbs[0])
2063 + idxTbChunk * pVCpu->iem.s.pTbAllocatorR3->cTbsPerChunk;
2064 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2065 {
2066# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2067 case IEMTB_F_TYPE_NATIVE:
2068 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - native\n",
2069 GCPhysPc, iemR3GetTbFlatPc(pTb), fFlags, pVCpu->idCpu, idxTbNo, pTb);
2070 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2071 break;
2072# endif
2073
2074 case IEMTB_F_TYPE_THREADED:
2075 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - threaded\n",
2076 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb);
2077 iemThreadedDisassembleTb(pTb, pHlp);
2078 break;
2079
2080 default:
2081 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - ??? %#x\n",
2082 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb, pTb->fFlags);
2083 break;
2084 }
2085 }
2086}
2087
2088
2089/**
2090 * @callback_method_impl{FNDBGFINFOARGVINT, tbtop}
2091 */
2092static DECLCALLBACK(void) iemR3InfoTbTop(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
2093{
2094 /*
2095 * Parse arguments.
2096 */
2097 static RTGETOPTDEF const s_aOptions[] =
2098 {
2099 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
2100 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
2101 { "--dis", 'd', RTGETOPT_REQ_NOTHING },
2102 { "--disas", 'd', RTGETOPT_REQ_NOTHING },
2103 { "--disasm", 'd', RTGETOPT_REQ_NOTHING },
2104 { "--disassemble", 'd', RTGETOPT_REQ_NOTHING },
2105 { "--no-dis", 'D', RTGETOPT_REQ_NOTHING },
2106 { "--no-disas", 'D', RTGETOPT_REQ_NOTHING },
2107 { "--no-disasm", 'D', RTGETOPT_REQ_NOTHING },
2108 { "--no-disassemble", 'D', RTGETOPT_REQ_NOTHING },
2109 { "--most-freq", 'f', RTGETOPT_REQ_NOTHING },
2110 { "--most-frequent", 'f', RTGETOPT_REQ_NOTHING },
2111 { "--most-frequently", 'f', RTGETOPT_REQ_NOTHING },
2112 { "--most-frequently-used", 'f', RTGETOPT_REQ_NOTHING },
2113 { "--most-recent", 'r', RTGETOPT_REQ_NOTHING },
2114 { "--most-recently", 'r', RTGETOPT_REQ_NOTHING },
2115 { "--most-recently-used", 'r', RTGETOPT_REQ_NOTHING },
2116 { "--count", 'n', RTGETOPT_REQ_UINT32 },
2117 };
2118
2119 RTGETOPTSTATE State;
2120 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
2121 AssertRCReturnVoid(rc);
2122
2123 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
2124 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
2125 enum { kTbTop_MostFrequentlyUsed, kTbTop_MostRececentlyUsed }
2126 enmTop = kTbTop_MostFrequentlyUsed;
2127 bool fDisassemble = false;
2128 uint32_t const cTopDefault = 64;
2129 uint32_t const cTopMin = 1;
2130 uint32_t const cTopMax = 1024;
2131 uint32_t cTop = cTopDefault;
2132
2133 RTGETOPTUNION ValueUnion;
2134 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
2135 {
2136 switch (rc)
2137 {
2138 case 'c':
2139 if (ValueUnion.u32 >= pVM->cCpus)
2140 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
2141 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
2142 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
2143 break;
2144
2145 case 'd':
2146 fDisassemble = true;
2147 break;
2148
2149 case 'D':
2150 fDisassemble = true;
2151 break;
2152
2153 case 'f':
2154 enmTop = kTbTop_MostFrequentlyUsed;
2155 break;
2156
2157 case 'r':
2158 enmTop = kTbTop_MostRececentlyUsed;
2159 break;
2160
2161 case VINF_GETOPT_NOT_OPTION:
2162 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cTop);
2163 if (RT_FAILURE(rc))
2164 {
2165 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
2166 return;
2167 }
2168 ValueUnion.u32 = cTop;
2169 RT_FALL_THROUGH();
2170 case 'n':
2171 if (!ValueUnion.u32)
2172 cTop = cTopDefault;
2173 else
2174 {
2175 cTop = RT_MAX(RT_MIN(ValueUnion.u32, cTopMax), cTopMin);
2176 if (cTop != ValueUnion.u32)
2177 pHlp->pfnPrintf(pHlp, "warning: adjusted %u to %u (valid range: [%u..%u], 0 for default (%d))",
2178 ValueUnion.u32, cTop, cTopMin, cTopMax, cTopDefault);
2179 }
2180 break;
2181
2182 case 'h':
2183 pHlp->pfnPrintf(pHlp,
2184 "Usage: info tbtop [options]\n"
2185 "\n"
2186 "Options:\n"
2187 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
2188 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
2189 " -d, --dis[as[m]], --disassemble\n"
2190 " Show full TB disassembly.\n"
2191 " -D, --no-dis[as[m]], --no-disassemble\n"
2192 " Do not show TB diassembly. The default.\n"
2193 " -f, --most-freq[ent[ly[-used]]]\n"
2194 " Shows the most frequently used TBs (IEMTB::cUsed). The default.\n"
2195 " -r, --most-recent[ly[-used]]\n"
2196 " Shows the most recently used TBs (IEMTB::msLastUsed).\n"
2197 " -n<num>, --count=<num>\n"
2198 " The number of TBs to display. Default: %u\n"
2199 " This is also what non-option arguments will be taken as.\n"
2200 , cTopDefault);
2201 return;
2202
2203 default:
2204 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
2205 return;
2206 }
2207 }
2208
2209 /* Currently, only do work on the same EMT. */
2210 if (pVCpu != pVCpuThis)
2211 {
2212 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
2213 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
2214 return;
2215 }
2216
2217 /*
2218 * Collect the data by scanning the TB allocation map.
2219 */
2220 struct IEMTBTOPENTRY
2221 {
2222 /** Pointer to the translation block. */
2223 PCIEMTB pTb;
2224 /** The sorting key. */
2225 uint64_t uSortKey;
2226 } aTop[cTopMax] = { { NULL, 0 }, };
2227 uint32_t cValid = 0;
2228 PIEMTBALLOCATOR pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2229 if (pTbAllocator)
2230 {
2231 uint32_t const cTbsPerChunk = pTbAllocator->cTbsPerChunk;
2232 for (uint32_t iChunk = 0; iChunk < pTbAllocator->cAllocatedChunks; iChunk++)
2233 {
2234 for (uint32_t iTb = 0; iTb < cTbsPerChunk; iTb++)
2235 {
2236 PCIEMTB const pTb = &pTbAllocator->aChunks[iChunk].paTbs[iTb];
2237 AssertContinue(pTb);
2238 if (pTb->fFlags & IEMTB_F_TYPE_MASK)
2239 {
2240 /* Extract and compose the sort key. */
2241 uint64_t const uSortKey = enmTop == kTbTop_MostFrequentlyUsed
2242 ? RT_MAKE_U64(pTb->msLastUsed, pTb->cUsed)
2243 : RT_MAKE_U64(pTb->cUsed, pTb->msLastUsed);
2244
2245 /*
2246 * Discard the key if it's smaller than the smallest in the table when it is full.
2247 */
2248 if ( cValid >= cTop
2249 && uSortKey <= aTop[cTop - 1].uSortKey)
2250 { /* discard it */ }
2251 else
2252 {
2253 /*
2254 * Do binary search to find the insert location
2255 */
2256 uint32_t idx;
2257 if (cValid > 0)
2258 {
2259 uint32_t idxEnd = cValid;
2260 uint32_t idxStart = 0;
2261 idx = cValid / 2;
2262 for (;;)
2263 {
2264 if (uSortKey > aTop[idx].uSortKey)
2265 {
2266 if (idx > idxStart)
2267 idxEnd = idx;
2268 else
2269 break;
2270 }
2271 else if (uSortKey < aTop[idx].uSortKey)
2272 {
2273 idx += 1;
2274 if (idx < idxEnd)
2275 idxStart = idx;
2276 else
2277 break;
2278 }
2279 else
2280 {
2281 do
2282 idx++;
2283 while (idx < cValid && uSortKey == aTop[idx].uSortKey);
2284 break;
2285 }
2286 idx = idxStart + (idxEnd - idxStart) / 2;
2287 }
2288 AssertContinue(idx < RT_ELEMENTS(aTop));
2289
2290 /*
2291 * Shift entries as needed.
2292 */
2293 if (cValid >= cTop)
2294 {
2295 if (idx != cTop - 1U)
2296 memmove(&aTop[idx + 1], &aTop[idx], (cTop - idx - 1) * sizeof(aTop[0]));
2297 }
2298 else
2299 {
2300 if (idx != cValid)
2301 memmove(&aTop[idx + 1], &aTop[idx], (cValid - idx) * sizeof(aTop[0]));
2302 cValid++;
2303 }
2304 }
2305 else
2306 {
2307 /* Special case: The first insertion. */
2308 cValid = 1;
2309 idx = 0;
2310 }
2311
2312 /*
2313 * Fill in the new entry.
2314 */
2315 aTop[idx].uSortKey = uSortKey;
2316 aTop[idx].pTb = pTb;
2317 }
2318 }
2319 }
2320 }
2321 }
2322
2323 /*
2324 * Display the result.
2325 */
2326 if (cTop > cValid)
2327 cTop = cValid;
2328 pHlp->pfnPrintf(pHlp, "Displaying the top %u TBs for CPU #%u ordered by %s:\n",
2329 cTop, pVCpu->idCpu, enmTop == kTbTop_MostFrequentlyUsed ? "cUsed" : "msLastUsed");
2330 if (fDisassemble)
2331 pHlp->pfnPrintf(pHlp, "================================================================================\n");
2332
2333 for (uint32_t idx = 0; idx < cTop; idx++)
2334 {
2335 if (fDisassemble && idx)
2336 pHlp->pfnPrintf(pHlp, "\n------------------------------- %u -------------------------------\n", idx);
2337
2338 PCIEMTB const pTb = aTop[idx].pTb;
2339 size_t const idxTbChunk = pTb->idxAllocChunk;
2340 Assert(idxTbChunk < pTbAllocator->cAllocatedChunks);
2341 size_t const idxTbNo = (pTb - &pTbAllocator->aChunks[idxTbChunk].paTbs[0])
2342 + idxTbChunk * pTbAllocator->cTbsPerChunk;
2343 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2344 {
2345# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2346 case IEMTB_F_TYPE_NATIVE:
2347 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - native\n",
2348 idxTbNo, pTb->GCPhysPc, iemR3GetTbFlatPc(pTb), pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2349 if (fDisassemble)
2350 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2351 break;
2352# endif
2353
2354 case IEMTB_F_TYPE_THREADED:
2355 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - threaded\n",
2356 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2357 if (fDisassemble)
2358 iemThreadedDisassembleTb(pTb, pHlp);
2359 break;
2360
2361 default:
2362 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - ???\n",
2363 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2364 break;
2365 }
2366 }
2367}
2368
2369#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
2370
2371
2372#ifdef VBOX_WITH_DEBUGGER
2373
2374/** @callback_method_impl{FNDBGCCMD,
2375 * Implements the '.alliem' command. }
2376 */
2377static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2378{
2379 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
2380 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2381 if (pVCpu)
2382 {
2383 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
2384 return VINF_SUCCESS;
2385 }
2386 RT_NOREF(paArgs, cArgs);
2387 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
2388}
2389
2390
2391/**
2392 * Called by IEMR3Init to register debugger commands.
2393 */
2394static void iemR3RegisterDebuggerCommands(void)
2395{
2396 /*
2397 * Register debugger commands.
2398 */
2399 static DBGCCMD const s_aCmds[] =
2400 {
2401 {
2402 /* .pszCmd = */ "iemflushtlb",
2403 /* .cArgsMin = */ 0,
2404 /* .cArgsMax = */ 0,
2405 /* .paArgDescs = */ NULL,
2406 /* .cArgDescs = */ 0,
2407 /* .fFlags = */ 0,
2408 /* .pfnHandler = */ iemR3DbgFlushTlbs,
2409 /* .pszSyntax = */ "",
2410 /* .pszDescription = */ "Flushed the code and data TLBs"
2411 },
2412 };
2413
2414 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
2415 AssertLogRelRC(rc);
2416}
2417
2418#endif /* VBOX_WITH_DEBUGGER */
2419
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette