VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 107227

Last change on this file since 107227 was 107227, checked in by vboxsync, 6 weeks ago

VMM: Cleaning up ARMv8 / x86 split. jiraref:VBP-1470

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 146.6 KB
Line 
1/* $Id: IEMR3.cpp 107227 2024-12-04 15:20:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39/** @todo this isn't sustainable. */
40#if defined(VBOX_VMM_TARGET_ARMV8)
41# include "IEMInternal-armv8.h"
42#else
43# include "IEMInternal.h"
44#endif
45#include <VBox/vmm/vm.h>
46#include <VBox/vmm/vmapi.h>
47#include <VBox/err.h>
48#ifdef VBOX_WITH_DEBUGGER
49# include <VBox/dbg.h>
50#endif
51
52#include <iprt/assert.h>
53#include <iprt/getopt.h>
54#ifdef IEM_WITH_TLB_TRACE
55# include <iprt/mem.h>
56#endif
57#include <iprt/string.h>
58
59#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
60# include "IEMN8veRecompiler.h"
61# include "IEMThreadedFunctions.h"
62# include "IEMInline.h"
63#endif
64
65
66/*********************************************************************************************************************************
67* Internal Functions *
68*********************************************************************************************************************************/
69static FNDBGFINFOARGVINT iemR3InfoITlb;
70static FNDBGFINFOARGVINT iemR3InfoDTlb;
71#ifdef IEM_WITH_TLB_TRACE
72static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
73#endif
74#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
75static FNDBGFINFOARGVINT iemR3InfoTb;
76static FNDBGFINFOARGVINT iemR3InfoTbTop;
77#endif
78#ifdef VBOX_WITH_DEBUGGER
79static void iemR3RegisterDebuggerCommands(void);
80#endif
81
82
83#if !defined(VBOX_VMM_TARGET_ARMV8)
84static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
85{
86 switch (enmTargetCpu)
87 {
88#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
89 CASE_RET_STR(IEMTARGETCPU_8086);
90 CASE_RET_STR(IEMTARGETCPU_V20);
91 CASE_RET_STR(IEMTARGETCPU_186);
92 CASE_RET_STR(IEMTARGETCPU_286);
93 CASE_RET_STR(IEMTARGETCPU_386);
94 CASE_RET_STR(IEMTARGETCPU_486);
95 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
96 CASE_RET_STR(IEMTARGETCPU_PPRO);
97 CASE_RET_STR(IEMTARGETCPU_CURRENT);
98#undef CASE_RET_STR
99 default: return "Unknown";
100 }
101}
102#endif
103
104
105#if defined(RT_ARCH_ARM64) && defined(_MSC_VER)
106# pragma warning(disable:4883) /* profile build: IEMR3.cpp(114) : warning C4883: 'IEMR3Init': function size suppresses optimizations*/
107#endif
108
109/**
110 * Initializes the interpreted execution manager.
111 *
112 * This must be called after CPUM as we're quering information from CPUM about
113 * the guest and host CPUs.
114 *
115 * @returns VBox status code.
116 * @param pVM The cross context VM structure.
117 */
118VMMR3DECL(int) IEMR3Init(PVM pVM)
119{
120 /*
121 * Read configuration.
122 */
123#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
124 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
125 int rc;
126#endif
127
128#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
129 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
130 * Controls whether the custom VBox specific CPUID host call interface is
131 * enabled or not. */
132# ifdef DEBUG_bird
133 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
134# else
135 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
136# endif
137 AssertLogRelRCReturn(rc, rc);
138#endif
139
140#ifdef VBOX_WITH_IEM_RECOMPILER
141 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
142 * Max number of TBs per EMT. */
143 uint32_t cMaxTbs = 0;
144 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
145 AssertLogRelRCReturn(rc, rc);
146 if (cMaxTbs < _16K || cMaxTbs > _8M)
147 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
148 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
149
150 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
151 * Initial (minimum) number of TBs per EMT in ring-3. */
152 uint32_t cInitialTbs = 0;
153 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
154 AssertLogRelRCReturn(rc, rc);
155 if (cInitialTbs < _16K || cInitialTbs > _8M)
156 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
157 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
158
159 /* Check that the two values makes sense together. Expect user/api to do
160 the right thing or get lost. */
161 if (cInitialTbs > cMaxTbs)
162 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
163 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
164 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
165
166 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
167 * Max executable memory for recompiled code per EMT. */
168 uint64_t cbMaxExec = 0;
169 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
170 AssertLogRelRCReturn(rc, rc);
171 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
172 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
173 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
174 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
175
176 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
177 * The executable memory allocator chunk size. */
178 uint32_t cbChunkExec = 0;
179 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
180 AssertLogRelRCReturn(rc, rc);
181 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
182 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
183 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
184 cbChunkExec, cbChunkExec, _1M, _256M);
185
186 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
187 * The initial executable memory allocator size (per EMT). The value is
188 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
189 uint64_t cbInitialExec = 0;
190 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
191 AssertLogRelRCReturn(rc, rc);
192 if (cbInitialExec > cbMaxExec)
193 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
194 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
195 cbInitialExec, cbInitialExec, cbMaxExec);
196
197 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
198 * The translation block use count value to do native recompilation at.
199 * Set to zero to disable native recompilation. */
200 uint32_t uTbNativeRecompileAtUsedCount = 16;
201 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
202 AssertLogRelRCReturn(rc, rc);
203
204 /** @cfgm{/IEM/HostICacheInvalidationViaHostAPI, bool, false}
205 * Whether to use any available host OS API for flushing the instruction cache
206 * after completing an translation block. */
207 bool fFlag = false;
208 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationViaHostAPI", &fFlag, false);
209 AssertLogRelRCReturn(rc, rc);
210 uint8_t fHostICacheInvalidation = fFlag ? IEMNATIVE_ICACHE_F_USE_HOST_API : 0;
211
212 /** @cfgm{/IEM/HostICacheInvalidationEndWithIsb, bool, false}
213 * Whether to include an ISB in the instruction cache invalidation sequence
214 * after completing an translation block. */
215 fFlag = false;
216 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationEndWithIsb", &fFlag, false);
217 AssertLogRelRCReturn(rc, rc);
218 if (fFlag)
219 fHostICacheInvalidation |= IEMNATIVE_ICACHE_F_END_WITH_ISH;
220
221#endif /* VBOX_WITH_IEM_RECOMPILER*/
222
223 /*
224 * Initialize per-CPU data and register statistics.
225 */
226#if 1
227 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
228 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
229#else
230 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
231 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
232#endif
233
234 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
235 {
236 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
237 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
238
239 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
240#ifndef VBOX_VMM_TARGET_ARMV8
241 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
242#endif
243 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
244#ifndef VBOX_VMM_TARGET_ARMV8
245 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
246 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
247 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
248 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
249#endif
250
251#ifndef VBOX_VMM_TARGET_ARMV8
252 pVCpu->iem.s.cTbsTillNextTimerPoll = 128;
253 pVCpu->iem.s.cTbsTillNextTimerPollPrev = 128;
254#endif
255
256 /*
257 * Host and guest CPU information.
258 */
259 if (idCpu == 0)
260 {
261 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
262 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
263#if !defined(VBOX_VMM_TARGET_ARMV8)
264 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
265 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
266 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
267# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
268 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
269 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
270 else
271# endif
272 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
273#else
274 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
275 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
276#endif
277
278#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
279 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
280 {
281 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
282 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
283 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
284 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
285 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
286 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
287 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
288 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
289 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
290 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
291 }
292 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
293 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
294 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
295#else
296 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
297 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
298 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
299#endif
300 }
301 else
302 {
303 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
304 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
305 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
306 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
307#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
308 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
309#endif
310 }
311
312 /*
313 * Mark all buffers free.
314 */
315 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
316 while (iMemMap-- > 0)
317 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
318
319#ifdef VBOX_WITH_IEM_RECOMPILER
320 /*
321 * Recompiler state and configuration distribution.
322 */
323 pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
324 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
325 pVCpu->iem.s.fHostICacheInvalidation = fHostICacheInvalidation;
326#endif
327
328#ifdef IEM_WITH_TLB_TRACE
329 /*
330 * Allocate trace buffer.
331 */
332 pVCpu->iem.s.idxTlbTraceEntry = 0;
333 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
334 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
335 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
336 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
337#endif
338 }
339
340
341#ifdef VBOX_WITH_IEM_RECOMPILER
342 /*
343 * Initialize the TB allocator and cache (/ hash table).
344 *
345 * This is done by each EMT to try get more optimal thread/numa locality of
346 * the allocations.
347 */
348 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
349 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
350 AssertLogRelRCReturn(rc, rc);
351#endif
352
353 /*
354 * Register statistics.
355 */
356 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
357 {
358#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
359 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
360 char szPat[128];
361 RT_NOREF_PV(szPat); /* lazy bird */
362 char szVal[128];
363 RT_NOREF_PV(szVal); /* lazy bird */
364
365 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
366 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
367 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
368 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
369 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
370 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
371 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
372 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
373 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
374 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
375 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
376 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
377 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
378 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
379 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
380 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
381 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
382 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
383 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
384 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
385
386 /* Code TLB: */
387 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
388 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
389 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
390 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
391 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
392 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
393 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
394 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
395 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
396 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
397
398 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
399 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
400 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
401 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
402 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
403 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
404
405 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
406 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
407 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
408 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
409 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
410 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
411
412 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
413 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
414 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
415 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
416 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
417 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
418
419 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
420 "Code TLB page invalidation requests", "/IEM/CPU%u/Tlb/Code/InvlPg", idCpu);
421 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
422 "Code TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeGlobal", idCpu);
423 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
424 "Code TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeNonGlobal", idCpu);
425
426 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
427 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
428 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
429 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
430 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
431 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
432# ifdef IEM_WITH_TLB_STATISTICS
433 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
434 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
435# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
436 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
437 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
438 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
439 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
440# endif
441
442 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
443 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
444 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
445
446 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
447 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
448 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
449
450 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
451 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
452 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
453 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
454
455# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
456 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
457 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
458 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
459 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
460 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
461 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
462 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
463 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
464 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
465 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
466 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
467 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
468 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
469 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
470 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
471
472 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
473 "Code TLB native misses on new page",
474 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
475 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
476 "Code TLB native misses on new page w/ offset",
477 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
478# endif
479# endif /* IEM_WITH_TLB_STATISTICS */
480
481 /* Data TLB organized as best we can... */
482 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
483 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
484 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
485 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
486 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
487 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
488 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
489 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
490 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
491 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
492
493 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
494 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
495 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
496 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
497 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
498 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
499
500 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
501 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
502 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
503 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
504 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
505 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
506
507 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
508 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
509 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
510 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
511 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
512 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
513
514 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
515 "Data TLB page invalidation requests", "/IEM/CPU%u/Tlb/Data/InvlPg", idCpu);
516 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
517 "Data TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeGlobal", idCpu);
518 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
519 "Data TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeNonGlobal", idCpu);
520
521 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
522 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
523 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
524 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
525 "Data TLB global loads",
526 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
527 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
528 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
529 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
530 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
531 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
532 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
533 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
534 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
535 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
536
537 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
538 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
539 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
540 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
541 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
542 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
543 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
544 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
545 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
546 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
547 "Data TLB global loads",
548 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
549
550# ifdef IEM_WITH_TLB_STATISTICS
551# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
552 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
553 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
554 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
555 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
556 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
557 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
558 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
559 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
560 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
561 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
562 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
563 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
564 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
565 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
566 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
567# endif
568# endif
569
570# ifdef IEM_WITH_TLB_STATISTICS
571 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
572 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
573 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
574 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
575 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
576 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
577# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
578 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
579 "Data TLB native stack access hits",
580 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
581 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
582 "Data TLB native data fetch hits",
583 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
584 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
585 "Data TLB native data store hits",
586 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
587 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
588 "Data TLB native mapped data hits",
589 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
590# endif
591 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
592 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
593 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
594
595# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
596 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
597 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
598 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
599# endif
600
601 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
602 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
603 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
604
605 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
606 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
607 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
608 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
609
610# endif /* IEM_WITH_TLB_STATISTICS */
611
612
613#ifdef VBOX_WITH_IEM_RECOMPILER
614 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
615 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
616 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
617 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
618 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
619 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
620# ifdef VBOX_WITH_STATISTICS
621 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
622 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
623 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
624 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
625# endif
626
627# ifdef VBOX_WITH_STATISTICS
628 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPoll, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
629 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll", idCpu);
630 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollRun, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
631 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll/Running", idCpu);
632 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollUnchanged, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
633 "Timer polling interval unchanged", "/IEM/CPU%u/re/TimerPoll/Unchanged", idCpu);
634 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollTiny, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
635 "Timer polling interval tiny", "/IEM/CPU%u/re/TimerPoll/Tiny", idCpu);
636 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollDefaultCalc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
637 "Timer polling interval calculated using defaults", "/IEM/CPU%u/re/TimerPoll/DefaultCalc", idCpu);
638 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollMax, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
639 "Timer polling interval maxed out", "/IEM/CPU%u/re/TimerPoll/Max", idCpu);
640 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorDivision, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE,
641 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorDivision", idCpu);
642 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorMultiplication, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
643 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorMultiplication", idCpu);
644# endif
645 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbsTillNextTimerPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
646 "Timer polling interval (in TBs)", "/IEM/CPU%u/re/TimerPollInterval", idCpu);
647
648 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
649 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
650 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
651 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
652 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
653# ifdef VBOX_WITH_STATISTICS
654 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
655 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
656# endif
657 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
658 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
659 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
660 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
661 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
662 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
663 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
664 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
665 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
666 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
667 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
668 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
669 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
670 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
671 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
672 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
673
674 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
675 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
676 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
677
678 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
679 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
680 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
681 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
682 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
683 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
684 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
685 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
686# ifdef VBOX_WITH_STATISTICS
687 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
688 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
689# endif
690
691 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
692 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
693 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
694 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
695 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
696 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
697
698 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
699 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
700 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckTimersBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
701 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckTimersBreaks", idCpu);
702 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
703 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
704 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
705 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
706 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
707 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
708
709 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
710 "Detected loop full TB", "/IEM/CPU%u/re/LoopFullTbDetected", idCpu);
711 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected2, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
712 "Detected loop full TB but looping back to before the first TB instruction",
713 "/IEM/CPU%u/re/LoopFullTbDetected2", idCpu);
714 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
715 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
716
717 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
718 "Number of times the exec memory allocator failed to allocate a large enough buffer",
719 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
720
721 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
722 "Number of threaded calls per TB that have been properly recompiled to native code",
723 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
724 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
725 "Number of threaded calls per TB that could not be recompiler to native code",
726 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
727 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
728 "Number of threaded calls that could not be recompiler to native code",
729 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
730
731 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
732 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
733 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
734 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
735
736# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
737# ifdef VBOX_WITH_STATISTICS
738 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
739 "Number of calls to iemNativeRegAllocFindFree.",
740 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
741# endif
742 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
743 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
744 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
745# ifdef VBOX_WITH_STATISTICS
746 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
747 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
748 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
749 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
750 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
751 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
752 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
753 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
754 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
755
756# define REG_NATIVE_EFL_GROUP(a_Lower, a_Camel) do { \
757 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponed ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
758 "Postponed all status flag updating, " #a_Lower " instructions", \
759 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
760 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkipped ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
761 "Skipped all status flag updating, " #a_Lower " instructions", \
762 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
763 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflTotal ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
764 "Total number of " #a_Lower " intructions with status flag updating", \
765 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
766 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
767 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
768 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
769 "Postponed all status flag updating, " #a_Lower " instructions, percentage", \
770 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "PostponedPct", idCpu); \
771 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
772 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
773 "Skipped all status flag updating, " #a_Lower " instructions, percentage", \
774 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "SkippedPct", idCpu); \
775 } while (0)
776 REG_NATIVE_EFL_GROUP(arithmetic, Arithmetic);
777 REG_NATIVE_EFL_GROUP(logical, Logical);
778 REG_NATIVE_EFL_GROUP(shift, Shift);
779# undef REG_NATIVE_EFL_GROUP
780
781 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponedEmits, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
782 "Postponed EFLAGS calculation emits", "/IEM/CPU%u/re/NativeEFlags/ZZEmits", idCpu);
783
784 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
785 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippable", idCpu);
786 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippable", idCpu);
787 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippable", idCpu);
788 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippable", idCpu);
789 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippable", idCpu);
790
791 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfRequired", idCpu);
792 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfRequired", idCpu);
793 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfRequired", idCpu);
794 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfRequired", idCpu);
795 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfRequired", idCpu);
796 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfRequired", idCpu);
797
798# ifdef IEMLIVENESS_EXTENDED_LAYOUT
799 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfDelayable", idCpu);
800 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfDelayable", idCpu);
801 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfDelayable", idCpu);
802 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfDelayable", idCpu);
803 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfDelayable", idCpu);
804 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfDelayable", idCpu);
805# endif
806
807 /* Sum up all status bits ('_' is a sorting hack). */
808 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fSkippable*", idCpu);
809 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
810 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
811
812 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fRequired*", idCpu);
813 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
814 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
815
816# ifdef IEMLIVENESS_EXTENDED_LAYOUT
817 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fDelayable*", idCpu);
818 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
819 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
820# endif
821
822 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?f*", idCpu);
823 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
824 "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
825
826 /* Corresponding ratios / percentages of the totals. */
827 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
828 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
829 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
830 "Total skippable EFLAGS status bit updating percentage",
831 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippablePct", idCpu);
832
833 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
834 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
835 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
836 "Total required EFLAGS status bit updating percentage",
837 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequiredPct", idCpu);
838
839# ifdef IEMLIVENESS_EXTENDED_LAYOUT
840 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
841 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
842 "Total potentially delayable EFLAGS status bit updating percentage",
843 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayablePct", idCpu);
844# endif
845
846 /* Ratios of individual bits. */
847 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/Cf*", idCpu) - 3;
848 Assert(szPat[offFlagChar] == 'C');
849 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
850 Assert(szVal[offFlagChar] == 'C');
851 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippablePct", idCpu);
852 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippablePct", idCpu);
853 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippablePct", idCpu);
854 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippablePct", idCpu);
855 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippablePct", idCpu);
856 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippablePct", idCpu);
857
858 /* PC updates total and skipped, with PCT ratio. */
859 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
860 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
861 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
862 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
863 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
864 "Delayed RIP updating percentage",
865 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
866
867# endif /* VBOX_WITH_STATISTICS */
868# ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
869 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEndIfOtherBranchDirty, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
870 "IEM_MC_ENDIF flushing dirty shadow registers for other branch (not good).",
871 "/IEM/CPU%u/re/NativeEndIfOtherBranchDirty", idCpu);
872# endif
873# ifdef VBOX_WITH_STATISTICS
874 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
875 "Number of calls to iemNativeSimdRegAllocFindFree.",
876 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
877 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
878 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
879 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
880 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
881 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
882 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
883 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
884 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
885 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
886 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
887 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
888 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
889
890 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
891 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
892 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
893 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
894 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
895 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
896 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
897 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
898
899 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
900 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
901 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
902 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
903 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
904 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
905 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
906 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
907
908 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
909 "Number of times the TB finishes execution completely",
910 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
911# endif /* VBOX_WITH_STATISTICS */
912 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
913 "Number of times the TB finished through the ReturnBreak label",
914 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
915 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
916 "Number of times the TB finished through the ReturnBreak label",
917 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
918 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
919 "Number of times the TB finished through the ReturnWithFlags label",
920 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
921 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
922 "Number of times the TB finished with some other status value",
923 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
924 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
925 "Number of times the TB finished via long jump / throw",
926 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
927 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
928 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
929 "Number of times the TB finished through the ObsoleteTb label",
930 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
931 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
932 "Number of times the TB finished through the NeedCsLimChecking label",
933 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
934 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
935 "Number of times the TB finished through the CheckBranchMiss label",
936 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
937 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
938 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
939# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
940# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
941# else
942# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
943# endif
944 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
945 "Number of times the TB finished raising a #DE exception",
946 RAISE_PREFIX "RaiseDe", idCpu);
947 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
948 "Number of times the TB finished raising a #UD exception",
949 RAISE_PREFIX "RaiseUd", idCpu);
950 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
951 "Number of times the TB finished raising a SSE related exception",
952 RAISE_PREFIX "RaiseSseRelated", idCpu);
953 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
954 "Number of times the TB finished raising a AVX related exception",
955 RAISE_PREFIX "RaiseAvxRelated", idCpu);
956 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
957 "Number of times the TB finished raising a SSE/AVX floating point related exception",
958 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
959 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
960 "Number of times the TB finished raising a #NM exception",
961 RAISE_PREFIX "RaiseNm", idCpu);
962 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
963 "Number of times the TB finished raising a #GP(0) exception",
964 RAISE_PREFIX "RaiseGp0", idCpu);
965 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
966 "Number of times the TB finished raising a #MF exception",
967 RAISE_PREFIX "RaiseMf", idCpu);
968 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
969 "Number of times the TB finished raising a #XF exception",
970 RAISE_PREFIX "RaiseXf", idCpu);
971
972# ifdef VBOX_WITH_STATISTICS
973 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLoopFullTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
974 "Number of full TB loops.",
975 "/IEM/CPU%u/re/NativeTbExit/LoopFullTb", idCpu);
976# endif
977
978 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
979 "Direct linking #1 with IRQ check succeeded",
980 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
981 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
982 "Direct linking #1 w/o IRQ check succeeded",
983 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
984# ifdef VBOX_WITH_STATISTICS
985 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
986 "Direct linking #1 failed: No TB in lookup table",
987 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
988 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
989 "Direct linking #1 failed: GCPhysPc mismatch",
990 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
991 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
992 "Direct linking #1 failed: TB flags mismatch",
993 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
994 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
995 "Direct linking #1 failed: IRQ or FF pending",
996 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
997# endif
998
999 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1000 "Direct linking #2 with IRQ check succeeded",
1001 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
1002 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1003 "Direct linking #2 w/o IRQ check succeeded",
1004 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
1005# ifdef VBOX_WITH_STATISTICS
1006 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1007 "Direct linking #2 failed: No TB in lookup table",
1008 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
1009 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1010 "Direct linking #2 failed: GCPhysPc mismatch",
1011 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
1012 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1013 "Direct linking #2 failed: TB flags mismatch",
1014 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
1015 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1016 "Direct linking #2 failed: IRQ or FF pending",
1017 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
1018# endif
1019
1020 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
1021 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
1022 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
1023 "/IEM/CPU%u/re/NativeTbExit", idCpu);
1024
1025
1026# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
1027
1028
1029# ifdef VBOX_WITH_STATISTICS
1030 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1031 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
1032 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1033 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
1034 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1035 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
1036 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1037 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
1038# endif
1039
1040
1041#endif /* VBOX_WITH_IEM_RECOMPILER */
1042
1043 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
1044 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1045 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
1046 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
1047 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1048 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
1049
1050# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
1051 /* Instruction statistics: */
1052# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
1053 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1054 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
1055 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1056 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
1057# include "IEMInstructionStatisticsTmpl.h"
1058# undef IEM_DO_INSTR_STAT
1059# endif
1060
1061# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1062 /* Threaded function statistics: */
1063 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
1064 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
1065 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
1066# endif
1067
1068
1069 for (unsigned i = 1; i < RT_ELEMENTS(pVCpu->iem.s.aStatAdHoc); i++)
1070 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatAdHoc[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1071 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/AdHoc/%02u", idCpu, i);
1072
1073#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
1074 }
1075
1076#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1077 /*
1078 * Register the per-VM VMX APIC-access page handler type.
1079 */
1080 if (pVM->cpum.ro.GuestFeatures.fVmx)
1081 {
1082 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
1083 iemVmxApicAccessPageHandler,
1084 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
1085 AssertLogRelRCReturn(rc, rc);
1086 }
1087#endif
1088
1089 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1090 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1091#ifdef IEM_WITH_TLB_TRACE
1092 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
1093#endif
1094#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1095 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
1096 DBGFR3InfoRegisterInternalArgv(pVM, "tbtop", "IEM translation blocks most used or most recently used",
1097 iemR3InfoTbTop, DBGFINFO_FLAGS_RUN_ON_EMT);
1098#endif
1099#ifdef VBOX_WITH_DEBUGGER
1100 iemR3RegisterDebuggerCommands();
1101#endif
1102
1103 return VINF_SUCCESS;
1104}
1105
1106
1107VMMR3DECL(int) IEMR3Term(PVM pVM)
1108{
1109 NOREF(pVM);
1110#ifdef IEM_WITH_TLB_TRACE
1111 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1112 {
1113 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1114 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1115 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1116 }
1117#endif
1118#if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) && defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
1119 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1120 iemThreadedSaveTbForProfilingCleanup(pVM->apCpusR3[idCpu]);
1121#endif
1122 return VINF_SUCCESS;
1123}
1124
1125
1126VMMR3DECL(void) IEMR3Relocate(PVM pVM)
1127{
1128 RT_NOREF(pVM);
1129}
1130
1131
1132/**
1133 * Gets the name of a generic IEM exit code.
1134 *
1135 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1136 * @param uExit The IEM exit to name.
1137 */
1138VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1139{
1140 static const char * const s_apszNames[] =
1141 {
1142 /* external interrupts */
1143 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1144 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1145 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1146 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1147 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1148 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1149 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1150 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1151 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1152 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1153 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1154 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1155 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1156 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1157 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1158 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1159 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1160 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1161 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1162 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1163 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1164 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1165 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1166 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1167 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1168 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1169 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1170 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1171 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1172 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1173 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1174 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1175 /* software interrups */
1176 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1177 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1178 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1179 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1180 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1181 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1182 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1183 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1184 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1185 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1186 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1187 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1188 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1189 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1190 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1191 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1192 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1193 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1194 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1195 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1196 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1197 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1198 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1199 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1200 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1201 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1202 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1203 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1204 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1205 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1206 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1207 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1208 };
1209 if (uExit < RT_ELEMENTS(s_apszNames))
1210 return s_apszNames[uExit];
1211 return NULL;
1212}
1213
1214
1215/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1216static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1217{
1218 if (*pfHeader)
1219 return;
1220 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1221 *pfHeader = true;
1222}
1223
1224
1225#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1226#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1227
1228/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1229static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1230 uint32_t uSlot, uint32_t fFlags)
1231{
1232#ifndef VBOX_VMM_TARGET_ARMV8
1233 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1234#else
1235 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1236#endif
1237 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1238 return;
1239
1240 /* The address needs to be sign extended, thus the shifting fun here.*/
1241 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1242 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1243 const char *pszValid = "";
1244#ifndef VBOX_VMM_TARGET_ARMV8
1245 char szTmp[128];
1246 if (fFlags & IEMR3INFOTLB_F_CHECK)
1247 {
1248 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1249 PGMPTWALKFAST WalkFast;
1250 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1251 pszValid = szTmp;
1252 if (RT_FAILURE(rc))
1253 switch (rc)
1254 {
1255 case VERR_PAGE_TABLE_NOT_PRESENT:
1256 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1257 {
1258 case 1: pszValid = " stale(page-not-present)"; break;
1259 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1260 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1261 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1262 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1263 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1264 }
1265 break;
1266 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1267 }
1268 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1269 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1270 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1271 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1272 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1273 | fInvSlotG ) )
1274 pszValid = " still-valid";
1275 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1276 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1277 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1278 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1279 {
1280 case X86_PTE_A:
1281 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1282 break;
1283 case X86_PTE_D:
1284 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1285 break;
1286 case X86_PTE_D | X86_PTE_A:
1287 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1288 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1289 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1290 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1291 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1292 break;
1293 default: AssertFailed(); break;
1294 }
1295 else
1296 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1297 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1298 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1299 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1300 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1301 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1302 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1303 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1304 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1305 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1306 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1307 }
1308#else
1309 RT_NOREF(pVCpu);
1310#endif
1311
1312 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1313 uSlot,
1314 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1315 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1316 : "expired",
1317 GCPtr, /* -> */
1318 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1319 /* / */
1320 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1321 /* */
1322 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1323 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1324 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1325 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1326 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1327 !(uSlot & 1) ? "-" : "G",
1328 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1329 /* / */
1330 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1331 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1332 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1333 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1334 /* / */
1335 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1336 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1337 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1338 pszValid);
1339}
1340
1341
1342/** Displays one or more TLB slots. */
1343static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1344 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1345{
1346 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1347 {
1348 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1349 {
1350 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1351 cSlots, RT_ELEMENTS(pTlb->aEntries));
1352 cSlots = RT_ELEMENTS(pTlb->aEntries);
1353 }
1354
1355 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1356 while (cSlots-- > 0)
1357 {
1358 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1359 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1360 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1361 }
1362 }
1363 else
1364 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1365 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1366}
1367
1368
1369/** Displays the TLB slot for the given address. */
1370static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1371 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1372{
1373 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1374
1375 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(uAddress);
1376#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1377 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1378#else
1379 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1380#endif
1381 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1382#ifndef VBOX_VMM_TARGET_ARMV8
1383 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1384#endif
1385 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1386 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1387 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1388 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1389
1390#ifndef VBOX_VMM_TARGET_ARMV8
1391 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1392 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1393 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1394 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1395#endif
1396}
1397
1398
1399/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1400static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1401{
1402 /*
1403 * This is entirely argument driven.
1404 */
1405 static RTGETOPTDEF const s_aOptions[] =
1406 {
1407 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1408 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1409 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1410 { "all", 'A', RTGETOPT_REQ_NOTHING },
1411 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1412 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1413 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1414 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1415 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1416 };
1417
1418 RTGETOPTSTATE State;
1419 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1420 AssertRCReturnVoid(rc);
1421
1422 uint32_t cActionArgs = 0;
1423 bool fNeedHeader = true;
1424 bool fAddressMode = true;
1425 uint32_t fFlags = 0;
1426 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1427 PVMCPU pVCpu = pVCpuCall;
1428 if (!pVCpu)
1429 pVCpu = VMMGetCpuById(pVM, 0);
1430
1431 RTGETOPTUNION ValueUnion;
1432 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1433 {
1434 switch (rc)
1435 {
1436 case 'c':
1437 if (ValueUnion.u32 >= pVM->cCpus)
1438 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1439 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1440 {
1441 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1442 fNeedHeader = true;
1443 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1444 {
1445 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1446 ValueUnion.u32, pVCpuCall->idCpu);
1447 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1448 }
1449 }
1450 break;
1451
1452 case 'C':
1453 if (!pVCpuCall)
1454 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1455 else if (pVCpu != pVCpuCall)
1456 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1457 pVCpu->idCpu, pVCpuCall->idCpu);
1458 else
1459 fFlags |= IEMR3INFOTLB_F_CHECK;
1460 break;
1461
1462 case 'a':
1463 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1464 ValueUnion.u64, fFlags, &fNeedHeader);
1465 fAddressMode = true;
1466 cActionArgs++;
1467 break;
1468
1469 case 'A':
1470 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1471 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1472 cActionArgs++;
1473 break;
1474
1475 case 'r':
1476 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1477 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1478 fAddressMode = false;
1479 cActionArgs++;
1480 break;
1481
1482 case 's':
1483 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1484 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1485 fAddressMode = false;
1486 cActionArgs++;
1487 break;
1488
1489 case 'v':
1490 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1491 break;
1492
1493 case VINF_GETOPT_NOT_OPTION:
1494 if (fAddressMode)
1495 {
1496 uint64_t uAddr;
1497 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1498 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1499 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1500 uAddr, fFlags, &fNeedHeader);
1501 else
1502 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1503 }
1504 else
1505 {
1506 uint32_t uSlot;
1507 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1508 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1509 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1510 uSlot, 1, fFlags, &fNeedHeader);
1511 else
1512 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1513 }
1514 cActionArgs++;
1515 break;
1516
1517 case 'h':
1518 pHlp->pfnPrintf(pHlp,
1519 "Usage: info %ctlb [options]\n"
1520 "\n"
1521 "Options:\n"
1522 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1523 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1524 " -C,--check\n"
1525 " Check valid entries against guest PTs.\n"
1526 " -A, --all, all\n"
1527 " Display all the TLB entries (default if no other args).\n"
1528 " -a<virt>, --address=<virt>\n"
1529 " Shows the TLB entry for the specified guest virtual address.\n"
1530 " -r<slot:count>, --range=<slot:count>\n"
1531 " Shows the TLB entries for the specified slot range.\n"
1532 " -s<slot>,--slot=<slot>\n"
1533 " Shows the given TLB slot.\n"
1534 " -v,--only-valid\n"
1535 " Only show valid TLB entries (TAG, not phys)\n"
1536 "\n"
1537 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1538 "defaulting to addresses if not preceeded by any of those options.\n"
1539 , fITlb ? 'i' : 'd');
1540 return;
1541
1542 default:
1543 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1544 return;
1545 }
1546 }
1547
1548 /*
1549 * If no action taken, we display all (-A) by default.
1550 */
1551 if (!cActionArgs)
1552 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1553 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1554}
1555
1556
1557/**
1558 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1559 */
1560static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1561{
1562 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1563}
1564
1565
1566/**
1567 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1568 */
1569static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1570{
1571 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1572}
1573
1574
1575#ifdef IEM_WITH_TLB_TRACE
1576/**
1577 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1578 */
1579static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1580{
1581 /*
1582 * Parse arguments.
1583 */
1584 static RTGETOPTDEF const s_aOptions[] =
1585 {
1586 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1587 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1588 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1589 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1590 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1591 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1592 };
1593
1594 RTGETOPTSTATE State;
1595 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1596 AssertRCReturnVoid(rc);
1597
1598 uint32_t cLimit = UINT32_MAX;
1599 bool fStopAtGlobalFlush = false;
1600 bool fResolveRip = false;
1601 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1602 PVMCPU pVCpu = pVCpuCall;
1603 if (!pVCpu)
1604 pVCpu = VMMGetCpuById(pVM, 0);
1605
1606 RTGETOPTUNION ValueUnion;
1607 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1608 {
1609 switch (rc)
1610 {
1611 case 'c':
1612 if (ValueUnion.u32 >= pVM->cCpus)
1613 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1614 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1615 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1616 break;
1617
1618 case 'l':
1619 cLimit = ValueUnion.u32;
1620 break;
1621
1622 case 'g':
1623 fStopAtGlobalFlush = true;
1624 break;
1625
1626 case 'r':
1627 fResolveRip = true;
1628 break;
1629
1630 case 'h':
1631 pHlp->pfnPrintf(pHlp,
1632 "Usage: info tlbtrace [options] [n]\n"
1633 "\n"
1634 "Options:\n"
1635 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1636 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1637 " [n], -l<n>, --last=<n>\n"
1638 " Limit display to the last N entries. Default: all\n"
1639 " -g, --stop-at-global-flush\n"
1640 " Stop after the first global flush entry.\n"
1641 " -r, --resolve-rip\n"
1642 " Resolve symbols for the flattened RIP addresses.\n"
1643 );
1644 return;
1645
1646 case VINF_GETOPT_NOT_OPTION:
1647 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1648 if (RT_SUCCESS(rc))
1649 break;
1650 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1651 return;
1652
1653 default:
1654 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1655 return;
1656 }
1657 }
1658
1659 /*
1660 * Get the details.
1661 */
1662 AssertReturnVoid(pVCpu);
1663 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1664 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1665 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1666 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1667 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1668 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1669 if (cLeft && paEntries)
1670 {
1671 /*
1672 * Display the entries.
1673 */
1674 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1675 while (cLeft-- > 0)
1676 {
1677 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1678 const char *pszSymbol = "";
1679 union
1680 {
1681 RTDBGSYMBOL Symbol;
1682 char ach[sizeof(RTDBGSYMBOL) + 32];
1683 } uBuf;
1684 if (fResolveRip)
1685 {
1686 RTGCINTPTR offDisp = 0;
1687 DBGFADDRESS Addr;
1688 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1689 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1690 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1691 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1692 &offDisp, &uBuf.Symbol, NULL);
1693 if (RT_SUCCESS(rc))
1694 {
1695 /* Add displacement. */
1696 if (offDisp)
1697 {
1698 size_t const cchName = strlen(uBuf.Symbol.szName);
1699 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1700 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1701 if (offDisp > 0)
1702 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1703 else
1704 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1705 }
1706
1707 /* Put a space before it. */
1708 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1709 char *pszName = uBuf.Symbol.szName;
1710 *--pszName = ' ';
1711 pszSymbol = pszName;
1712 }
1713 }
1714 static const char *s_apszTlbType[2] = { "code", "data" };
1715 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1716 switch (pCur->enmType)
1717 {
1718 case kIemTlbTraceType_InvlPg:
1719 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1720 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param), pszSymbol);
1721 break;
1722 case kIemTlbTraceType_EvictSlot:
1723 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1724 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1725 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1726 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1727 pCur->u64Param2, pszSymbol);
1728 break;
1729 case kIemTlbTraceType_LargeEvictSlot:
1730 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1731 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1732 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1733 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1734 pCur->u64Param2, pszSymbol);
1735 break;
1736 case kIemTlbTraceType_LargeScan:
1737 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1738 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1739 break;
1740
1741 case kIemTlbTraceType_Flush:
1742 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1743 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1744 break;
1745 case kIemTlbTraceType_FlushGlobal:
1746 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1747 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1748 if (fStopAtGlobalFlush)
1749 return;
1750 break;
1751 case kIemTlbTraceType_Load:
1752 case kIemTlbTraceType_LoadGlobal:
1753 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1754 idx, pCur->rip,
1755 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1756 pCur->u64Param,
1757 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)
1758 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1759 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1760 break;
1761
1762 case kIemTlbTraceType_Load_Cr0:
1763 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1764 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1765 break;
1766 case kIemTlbTraceType_Load_Cr3:
1767 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1768 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1769 break;
1770 case kIemTlbTraceType_Load_Cr4:
1771 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1772 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1773 break;
1774 case kIemTlbTraceType_Load_Efer:
1775 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1776 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1777 break;
1778
1779 case kIemTlbTraceType_Irq:
1780 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1781 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1782 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1783 pszSymbol);
1784 break;
1785 case kIemTlbTraceType_Xcpt:
1786 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1787 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1788 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1789 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1790 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1791 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1792 else
1793 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1794 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1795 break;
1796 case kIemTlbTraceType_IRet:
1797 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1798 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1799 break;
1800
1801 case kIemTlbTraceType_Tb_Compile:
1802 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1803 idx, pCur->rip, pCur->u64Param, pszSymbol);
1804 break;
1805 case kIemTlbTraceType_Tb_Exec_Threaded:
1806 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1807 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1808 break;
1809 case kIemTlbTraceType_Tb_Exec_Native:
1810 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1811 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1812 break;
1813
1814 case kIemTlbTraceType_User0:
1815 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1816 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1817 break;
1818 case kIemTlbTraceType_User1:
1819 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1820 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1821 break;
1822 case kIemTlbTraceType_User2:
1823 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1824 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1825 break;
1826 case kIemTlbTraceType_User3:
1827 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1828 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1829 break;
1830
1831 case kIemTlbTraceType_Invalid:
1832 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1833 break;
1834 }
1835 }
1836 }
1837 else
1838 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1839}
1840#endif /* IEM_WITH_TLB_TRACE */
1841
1842#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1843
1844/**
1845 * Get get compile time flat PC for the TB.
1846 */
1847DECL_FORCE_INLINE(RTGCPTR) iemR3GetTbFlatPc(PCIEMTB pTb)
1848{
1849#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
1850 if (pTb->fFlags & IEMTB_F_TYPE_NATIVE)
1851 {
1852 PCIEMTBDBG const pDbgInfo = pTb->pDbgInfo;
1853 return pDbgInfo ? pDbgInfo->FlatPc : RTGCPTR_MAX;
1854 }
1855#endif
1856 return pTb->FlatPc;
1857}
1858
1859
1860/**
1861 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1862 */
1863static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1864{
1865 /*
1866 * Parse arguments.
1867 */
1868 static RTGETOPTDEF const s_aOptions[] =
1869 {
1870 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1871 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1872 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1873 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1874 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1875 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1876 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1877 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1878 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1879 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1880 { "--tb", 't', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1881 { "--tb-id", 't', RTGETOPT_REQ_UINT32 },
1882 };
1883
1884 RTGETOPTSTATE State;
1885 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1886 AssertRCReturnVoid(rc);
1887
1888 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1889 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1890 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1891 RTGCPHYS GCVirt = NIL_RTGCPTR;
1892 uint32_t fFlags = UINT32_MAX;
1893 uint32_t idTb = UINT32_MAX;
1894
1895 RTGETOPTUNION ValueUnion;
1896 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1897 {
1898 switch (rc)
1899 {
1900 case 'c':
1901 if (ValueUnion.u32 >= pVM->cCpus)
1902 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1903 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1904 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1905 break;
1906
1907 case 'a':
1908 GCVirt = ValueUnion.u64;
1909 GCPhysPc = NIL_RTGCPHYS;
1910 idTb = UINT32_MAX;
1911 break;
1912
1913 case 'p':
1914 GCVirt = NIL_RTGCPHYS;
1915 GCPhysPc = ValueUnion.u64;
1916 idTb = UINT32_MAX;
1917 break;
1918
1919 case 'f':
1920 fFlags = ValueUnion.u32;
1921 break;
1922
1923 case 't':
1924 GCVirt = NIL_RTGCPHYS;
1925 GCPhysPc = NIL_RTGCPHYS;
1926 idTb = ValueUnion.u32;
1927 break;
1928
1929 case VINF_GETOPT_NOT_OPTION:
1930 {
1931 if ( (ValueUnion.psz[0] == 'T' || ValueUnion.psz[0] == 't')
1932 && (ValueUnion.psz[1] == 'B' || ValueUnion.psz[1] == 'b')
1933 && ValueUnion.psz[2] == '#')
1934 {
1935 rc = RTStrToUInt32Full(&ValueUnion.psz[3], 0, &idTb);
1936 if (RT_SUCCESS(rc))
1937 {
1938 GCVirt = NIL_RTGCPHYS;
1939 GCPhysPc = NIL_RTGCPHYS;
1940 break;
1941 }
1942 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to TD ID: %Rrc\n", ValueUnion.psz, rc);
1943 }
1944 else
1945 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1946 return;
1947 }
1948
1949 case 'h':
1950 pHlp->pfnPrintf(pHlp,
1951 "Usage: info tb [options]\n"
1952 "\n"
1953 "Options:\n"
1954 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1955 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1956 " -a<virt>, --address=<virt>\n"
1957 " Shows the TB for the specified guest virtual address.\n"
1958 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1959 " Shows the TB for the specified guest physical address.\n"
1960 " -t<id>, --tb=<id>, --tb-id=<id>, TD#<id>\n"
1961 " Show the TB specified by the identifier/number (from tbtop).\n"
1962 " -f<flags>,--flags=<flags>\n"
1963 " The TB flags value (hex) to use when looking up the TB.\n"
1964 "\n"
1965 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1966 return;
1967
1968 default:
1969 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1970 return;
1971 }
1972 }
1973
1974 /* Currently, only do work on the same EMT. */
1975 if (pVCpu != pVCpuThis)
1976 {
1977 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1978 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1979 return;
1980 }
1981
1982 /*
1983 * Defaults.
1984 */
1985 if (GCPhysPc == NIL_RTGCPHYS && idTb == UINT32_MAX)
1986 {
1987 if (GCVirt == NIL_RTGCPTR)
1988 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1989 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1990 if (RT_FAILURE(rc))
1991 {
1992 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1993 return;
1994 }
1995 }
1996 if (fFlags == UINT32_MAX && idTb == UINT32_MAX)
1997 {
1998 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1999 fFlags = iemCalcExecFlags(pVCpu);
2000 if (pVM->cCpus == 1)
2001 fFlags |= IEM_F_X86_DISREGARD_LOCK;
2002 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
2003 fFlags |= IEMTB_F_INHIBIT_SHADOW;
2004 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
2005 fFlags |= IEMTB_F_INHIBIT_NMI;
2006 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
2007 {
2008 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
2009 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
2010 fFlags |= IEMTB_F_CS_LIM_CHECKS;
2011 }
2012 }
2013
2014 PCIEMTB pTb;
2015 if (idTb == UINT32_MAX)
2016 {
2017 /*
2018 * Do the lookup...
2019 *
2020 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
2021 * have much choice since we don't want to increase use counters and
2022 * trigger native recompilation.
2023 */
2024 fFlags &= IEMTB_F_KEY_MASK;
2025 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
2026 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
2027 pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
2028 while (pTb)
2029 {
2030 if (pTb->GCPhysPc == GCPhysPc)
2031 {
2032 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
2033 {
2034 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
2035 break;
2036 }
2037 }
2038 pTb = pTb->pNext;
2039 }
2040 if (!pTb)
2041 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
2042 }
2043 else
2044 {
2045 /*
2046 * Use the TB ID for indexing.
2047 */
2048 pTb = NULL;
2049 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2050 if (pTbAllocator)
2051 {
2052 size_t const idxTbChunk = idTb / pTbAllocator->cTbsPerChunk;
2053 size_t const idxTbInChunk = idTb % pTbAllocator->cTbsPerChunk;
2054 if (idxTbChunk < pTbAllocator->cAllocatedChunks)
2055 pTb = &pTbAllocator->aChunks[idxTbChunk].paTbs[idxTbInChunk];
2056 else
2057 pHlp->pfnPrintf(pHlp, "Invalid TB ID: %u (%#x)\n", idTb, idTb);
2058 }
2059 }
2060
2061 if (pTb)
2062 {
2063 /*
2064 * Disassemble according to type.
2065 */
2066 size_t const idxTbChunk = pTb->idxAllocChunk;
2067 size_t const idxTbNo = (pTb - &pVCpu->iem.s.pTbAllocatorR3->aChunks[idxTbChunk].paTbs[0])
2068 + idxTbChunk * pVCpu->iem.s.pTbAllocatorR3->cTbsPerChunk;
2069 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2070 {
2071# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2072 case IEMTB_F_TYPE_NATIVE:
2073 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - native\n",
2074 GCPhysPc, iemR3GetTbFlatPc(pTb), fFlags, pVCpu->idCpu, idxTbNo, pTb);
2075 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2076 break;
2077# endif
2078
2079 case IEMTB_F_TYPE_THREADED:
2080 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - threaded\n",
2081 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb);
2082 iemThreadedDisassembleTb(pTb, pHlp);
2083 break;
2084
2085 default:
2086 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - ??? %#x\n",
2087 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb, pTb->fFlags);
2088 break;
2089 }
2090 }
2091}
2092
2093
2094/**
2095 * @callback_method_impl{FNDBGFINFOARGVINT, tbtop}
2096 */
2097static DECLCALLBACK(void) iemR3InfoTbTop(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
2098{
2099 /*
2100 * Parse arguments.
2101 */
2102 static RTGETOPTDEF const s_aOptions[] =
2103 {
2104 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
2105 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
2106 { "--dis", 'd', RTGETOPT_REQ_NOTHING },
2107 { "--disas", 'd', RTGETOPT_REQ_NOTHING },
2108 { "--disasm", 'd', RTGETOPT_REQ_NOTHING },
2109 { "--disassemble", 'd', RTGETOPT_REQ_NOTHING },
2110 { "--no-dis", 'D', RTGETOPT_REQ_NOTHING },
2111 { "--no-disas", 'D', RTGETOPT_REQ_NOTHING },
2112 { "--no-disasm", 'D', RTGETOPT_REQ_NOTHING },
2113 { "--no-disassemble", 'D', RTGETOPT_REQ_NOTHING },
2114 { "--most-freq", 'f', RTGETOPT_REQ_NOTHING },
2115 { "--most-frequent", 'f', RTGETOPT_REQ_NOTHING },
2116 { "--most-frequently", 'f', RTGETOPT_REQ_NOTHING },
2117 { "--most-frequently-used", 'f', RTGETOPT_REQ_NOTHING },
2118 { "--most-recent", 'r', RTGETOPT_REQ_NOTHING },
2119 { "--most-recently", 'r', RTGETOPT_REQ_NOTHING },
2120 { "--most-recently-used", 'r', RTGETOPT_REQ_NOTHING },
2121 { "--count", 'n', RTGETOPT_REQ_UINT32 },
2122 };
2123
2124 RTGETOPTSTATE State;
2125 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
2126 AssertRCReturnVoid(rc);
2127
2128 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
2129 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
2130 enum { kTbTop_MostFrequentlyUsed, kTbTop_MostRececentlyUsed }
2131 enmTop = kTbTop_MostFrequentlyUsed;
2132 bool fDisassemble = false;
2133 uint32_t const cTopDefault = 64;
2134 uint32_t const cTopMin = 1;
2135 uint32_t const cTopMax = 1024;
2136 uint32_t cTop = cTopDefault;
2137
2138 RTGETOPTUNION ValueUnion;
2139 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
2140 {
2141 switch (rc)
2142 {
2143 case 'c':
2144 if (ValueUnion.u32 >= pVM->cCpus)
2145 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
2146 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
2147 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
2148 break;
2149
2150 case 'd':
2151 fDisassemble = true;
2152 break;
2153
2154 case 'D':
2155 fDisassemble = true;
2156 break;
2157
2158 case 'f':
2159 enmTop = kTbTop_MostFrequentlyUsed;
2160 break;
2161
2162 case 'r':
2163 enmTop = kTbTop_MostRececentlyUsed;
2164 break;
2165
2166 case VINF_GETOPT_NOT_OPTION:
2167 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cTop);
2168 if (RT_FAILURE(rc))
2169 {
2170 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
2171 return;
2172 }
2173 ValueUnion.u32 = cTop;
2174 RT_FALL_THROUGH();
2175 case 'n':
2176 if (!ValueUnion.u32)
2177 cTop = cTopDefault;
2178 else
2179 {
2180 cTop = RT_MAX(RT_MIN(ValueUnion.u32, cTopMax), cTopMin);
2181 if (cTop != ValueUnion.u32)
2182 pHlp->pfnPrintf(pHlp, "warning: adjusted %u to %u (valid range: [%u..%u], 0 for default (%d))",
2183 ValueUnion.u32, cTop, cTopMin, cTopMax, cTopDefault);
2184 }
2185 break;
2186
2187 case 'h':
2188 pHlp->pfnPrintf(pHlp,
2189 "Usage: info tbtop [options]\n"
2190 "\n"
2191 "Options:\n"
2192 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
2193 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
2194 " -d, --dis[as[m]], --disassemble\n"
2195 " Show full TB disassembly.\n"
2196 " -D, --no-dis[as[m]], --no-disassemble\n"
2197 " Do not show TB diassembly. The default.\n"
2198 " -f, --most-freq[ent[ly[-used]]]\n"
2199 " Shows the most frequently used TBs (IEMTB::cUsed). The default.\n"
2200 " -r, --most-recent[ly[-used]]\n"
2201 " Shows the most recently used TBs (IEMTB::msLastUsed).\n"
2202 " -n<num>, --count=<num>\n"
2203 " The number of TBs to display. Default: %u\n"
2204 " This is also what non-option arguments will be taken as.\n"
2205 , cTopDefault);
2206 return;
2207
2208 default:
2209 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
2210 return;
2211 }
2212 }
2213
2214 /* Currently, only do work on the same EMT. */
2215 if (pVCpu != pVCpuThis)
2216 {
2217 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
2218 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
2219 return;
2220 }
2221
2222 /*
2223 * Collect the data by scanning the TB allocation map.
2224 */
2225 struct IEMTBTOPENTRY
2226 {
2227 /** Pointer to the translation block. */
2228 PCIEMTB pTb;
2229 /** The sorting key. */
2230 uint64_t uSortKey;
2231 } aTop[cTopMax] = { { NULL, 0 }, };
2232 uint32_t cValid = 0;
2233 PIEMTBALLOCATOR pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2234 if (pTbAllocator)
2235 {
2236 uint32_t const cTbsPerChunk = pTbAllocator->cTbsPerChunk;
2237 for (uint32_t iChunk = 0; iChunk < pTbAllocator->cAllocatedChunks; iChunk++)
2238 {
2239 for (uint32_t iTb = 0; iTb < cTbsPerChunk; iTb++)
2240 {
2241 PCIEMTB const pTb = &pTbAllocator->aChunks[iChunk].paTbs[iTb];
2242 AssertContinue(pTb);
2243 if (pTb->fFlags & IEMTB_F_TYPE_MASK)
2244 {
2245 /* Extract and compose the sort key. */
2246 uint64_t const uSortKey = enmTop == kTbTop_MostFrequentlyUsed
2247 ? RT_MAKE_U64(pTb->msLastUsed, pTb->cUsed)
2248 : RT_MAKE_U64(pTb->cUsed, pTb->msLastUsed);
2249
2250 /*
2251 * Discard the key if it's smaller than the smallest in the table when it is full.
2252 */
2253 if ( cValid >= cTop
2254 && uSortKey <= aTop[cTop - 1].uSortKey)
2255 { /* discard it */ }
2256 else
2257 {
2258 /*
2259 * Do binary search to find the insert location
2260 */
2261 uint32_t idx;
2262 if (cValid > 0)
2263 {
2264 uint32_t idxEnd = cValid;
2265 uint32_t idxStart = 0;
2266 idx = cValid / 2;
2267 for (;;)
2268 {
2269 if (uSortKey > aTop[idx].uSortKey)
2270 {
2271 if (idx > idxStart)
2272 idxEnd = idx;
2273 else
2274 break;
2275 }
2276 else if (uSortKey < aTop[idx].uSortKey)
2277 {
2278 idx += 1;
2279 if (idx < idxEnd)
2280 idxStart = idx;
2281 else
2282 break;
2283 }
2284 else
2285 {
2286 do
2287 idx++;
2288 while (idx < cValid && uSortKey == aTop[idx].uSortKey);
2289 break;
2290 }
2291 idx = idxStart + (idxEnd - idxStart) / 2;
2292 }
2293 AssertContinue(idx < RT_ELEMENTS(aTop));
2294
2295 /*
2296 * Shift entries as needed.
2297 */
2298 if (cValid >= cTop)
2299 {
2300 if (idx != cTop - 1U)
2301 memmove(&aTop[idx + 1], &aTop[idx], (cTop - idx - 1) * sizeof(aTop[0]));
2302 }
2303 else
2304 {
2305 if (idx != cValid)
2306 memmove(&aTop[idx + 1], &aTop[idx], (cValid - idx) * sizeof(aTop[0]));
2307 cValid++;
2308 }
2309 }
2310 else
2311 {
2312 /* Special case: The first insertion. */
2313 cValid = 1;
2314 idx = 0;
2315 }
2316
2317 /*
2318 * Fill in the new entry.
2319 */
2320 aTop[idx].uSortKey = uSortKey;
2321 aTop[idx].pTb = pTb;
2322 }
2323 }
2324 }
2325 }
2326 }
2327
2328 /*
2329 * Display the result.
2330 */
2331 if (cTop > cValid)
2332 cTop = cValid;
2333 pHlp->pfnPrintf(pHlp, "Displaying the top %u TBs for CPU #%u ordered by %s:\n",
2334 cTop, pVCpu->idCpu, enmTop == kTbTop_MostFrequentlyUsed ? "cUsed" : "msLastUsed");
2335 if (fDisassemble)
2336 pHlp->pfnPrintf(pHlp, "================================================================================\n");
2337
2338 for (uint32_t idx = 0; idx < cTop; idx++)
2339 {
2340 if (fDisassemble && idx)
2341 pHlp->pfnPrintf(pHlp, "\n------------------------------- %u -------------------------------\n", idx);
2342
2343 PCIEMTB const pTb = aTop[idx].pTb;
2344 size_t const idxTbChunk = pTb->idxAllocChunk;
2345 Assert(idxTbChunk < pTbAllocator->cAllocatedChunks);
2346 size_t const idxTbNo = (pTb - &pTbAllocator->aChunks[idxTbChunk].paTbs[0])
2347 + idxTbChunk * pTbAllocator->cTbsPerChunk;
2348 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2349 {
2350# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2351 case IEMTB_F_TYPE_NATIVE:
2352 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - native\n",
2353 idxTbNo, pTb->GCPhysPc, iemR3GetTbFlatPc(pTb), pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2354 if (fDisassemble)
2355 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2356 break;
2357# endif
2358
2359 case IEMTB_F_TYPE_THREADED:
2360 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - threaded\n",
2361 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2362 if (fDisassemble)
2363 iemThreadedDisassembleTb(pTb, pHlp);
2364 break;
2365
2366 default:
2367 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - ???\n",
2368 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2369 break;
2370 }
2371 }
2372}
2373
2374#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
2375
2376
2377#ifdef VBOX_WITH_DEBUGGER
2378
2379/** @callback_method_impl{FNDBGCCMD,
2380 * Implements the '.alliem' command. }
2381 */
2382static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2383{
2384 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
2385 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2386 if (pVCpu)
2387 {
2388 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
2389 return VINF_SUCCESS;
2390 }
2391 RT_NOREF(paArgs, cArgs);
2392 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
2393}
2394
2395
2396/**
2397 * Called by IEMR3Init to register debugger commands.
2398 */
2399static void iemR3RegisterDebuggerCommands(void)
2400{
2401 /*
2402 * Register debugger commands.
2403 */
2404 static DBGCCMD const s_aCmds[] =
2405 {
2406 {
2407 /* .pszCmd = */ "iemflushtlb",
2408 /* .cArgsMin = */ 0,
2409 /* .cArgsMax = */ 0,
2410 /* .paArgDescs = */ NULL,
2411 /* .cArgDescs = */ 0,
2412 /* .fFlags = */ 0,
2413 /* .pfnHandler = */ iemR3DbgFlushTlbs,
2414 /* .pszSyntax = */ "",
2415 /* .pszDescription = */ "Flushed the code and data TLBs"
2416 },
2417 };
2418
2419 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
2420 AssertLogRelRC(rc);
2421}
2422
2423#endif /* VBOX_WITH_DEBUGGER */
2424
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette