VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 104051

Last change on this file since 104051 was 103964, checked in by vboxsync, 11 months ago

VMM/IEM: Some statistics on the SIMD register allocator and some fixes, bugref:10614

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 71.7 KB
Line 
1/* $Id: IEMR3.cpp 103964 2024-03-20 15:01:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#if defined(VBOX_VMM_TARGET_ARMV8)
39# include "IEMInternal-armv8.h"
40#else
41# include "IEMInternal.h"
42#endif
43#include <VBox/vmm/vm.h>
44#include <VBox/vmm/vmapi.h>
45#include <VBox/err.h>
46#ifdef VBOX_WITH_DEBUGGER
47# include <VBox/dbg.h>
48#endif
49
50#include <iprt/assert.h>
51#include <iprt/getopt.h>
52#include <iprt/string.h>
53
54#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
55# include "IEMN8veRecompiler.h"
56# include "IEMThreadedFunctions.h"
57# include "IEMInline.h"
58#endif
59
60
61/*********************************************************************************************************************************
62* Internal Functions *
63*********************************************************************************************************************************/
64static FNDBGFINFOARGVINT iemR3InfoITlb;
65static FNDBGFINFOARGVINT iemR3InfoDTlb;
66#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
67static FNDBGFINFOARGVINT iemR3InfoTb;
68#endif
69#ifdef VBOX_WITH_DEBUGGER
70static void iemR3RegisterDebuggerCommands(void);
71#endif
72
73
74#if !defined(VBOX_VMM_TARGET_ARMV8)
75static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
76{
77 switch (enmTargetCpu)
78 {
79#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
80 CASE_RET_STR(IEMTARGETCPU_8086);
81 CASE_RET_STR(IEMTARGETCPU_V20);
82 CASE_RET_STR(IEMTARGETCPU_186);
83 CASE_RET_STR(IEMTARGETCPU_286);
84 CASE_RET_STR(IEMTARGETCPU_386);
85 CASE_RET_STR(IEMTARGETCPU_486);
86 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
87 CASE_RET_STR(IEMTARGETCPU_PPRO);
88 CASE_RET_STR(IEMTARGETCPU_CURRENT);
89#undef CASE_RET_STR
90 default: return "Unknown";
91 }
92}
93#endif
94
95
96/**
97 * Initializes the interpreted execution manager.
98 *
99 * This must be called after CPUM as we're quering information from CPUM about
100 * the guest and host CPUs.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 */
105VMMR3DECL(int) IEMR3Init(PVM pVM)
106{
107 /*
108 * Read configuration.
109 */
110#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
111 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
112 int rc;
113#endif
114
115#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
116 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
117 * Controls whether the custom VBox specific CPUID host call interface is
118 * enabled or not. */
119# ifdef DEBUG_bird
120 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
121# else
122 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
123# endif
124 AssertLogRelRCReturn(rc, rc);
125#endif
126
127#ifdef VBOX_WITH_IEM_RECOMPILER
128 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
129 * Max number of TBs per EMT. */
130 uint32_t cMaxTbs = 0;
131 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
132 AssertLogRelRCReturn(rc, rc);
133 if (cMaxTbs < _16K || cMaxTbs > _8M)
134 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
135 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
136
137 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
138 * Initial (minimum) number of TBs per EMT in ring-3. */
139 uint32_t cInitialTbs = 0;
140 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
141 AssertLogRelRCReturn(rc, rc);
142 if (cInitialTbs < _16K || cInitialTbs > _8M)
143 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
144 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
145
146 /* Check that the two values makes sense together. Expect user/api to do
147 the right thing or get lost. */
148 if (cInitialTbs > cMaxTbs)
149 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
150 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
151 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
152
153 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
154 * Max executable memory for recompiled code per EMT. */
155 uint64_t cbMaxExec = 0;
156 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
157 AssertLogRelRCReturn(rc, rc);
158 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
159 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
160 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
161 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
162
163 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
164 * The executable memory allocator chunk size. */
165 uint32_t cbChunkExec = 0;
166 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
167 AssertLogRelRCReturn(rc, rc);
168 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
169 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
170 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
171 cbChunkExec, cbChunkExec, _1M, _256M);
172
173 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
174 * The initial executable memory allocator size (per EMT). The value is
175 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
176 uint64_t cbInitialExec = 0;
177 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
178 AssertLogRelRCReturn(rc, rc);
179 if (cbInitialExec > cbMaxExec)
180 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
181 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
182 cbInitialExec, cbInitialExec, cbMaxExec);
183
184#endif /* VBOX_WITH_IEM_RECOMPILER*/
185
186 /*
187 * Initialize per-CPU data and register statistics.
188 */
189 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
190 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
191
192 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
193 {
194 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
195 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
196
197 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
198 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
199
200 /*
201 * Host and guest CPU information.
202 */
203 if (idCpu == 0)
204 {
205 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
206 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
207#if !defined(VBOX_VMM_TARGET_ARMV8)
208 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
209 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
210 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
211# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
212 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
213 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
214 else
215# endif
216 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
217#else
218 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
219 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
220#endif
221
222#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
223 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
224 {
225 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
226 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
227 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
228 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
229 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
230 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
231 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
232 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
233 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
234 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
235 }
236 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
237 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
238 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
239#else
240 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
241 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
242 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
243#endif
244 }
245 else
246 {
247 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
248 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
249 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
250 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
251#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
252 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
253#endif
254 }
255
256 /*
257 * Mark all buffers free.
258 */
259 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
260 while (iMemMap-- > 0)
261 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
262 }
263
264
265#ifdef VBOX_WITH_IEM_RECOMPILER
266 /*
267 * Initialize the TB allocator and cache (/ hash table).
268 *
269 * This is done by each EMT to try get more optimal thread/numa locality of
270 * the allocations.
271 */
272 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
273 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
274 AssertLogRelRCReturn(rc, rc);
275#endif
276
277 /*
278 * Register statistics.
279 */
280 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
281 {
282#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
283 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
284
285 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
286 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
287 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
288 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
289 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
290 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
291 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
292 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
293 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
294 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
295 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
296 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
297 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
298 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
299 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
300 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
301 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
302 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
303 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
304 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
305
306 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
307 "Code TLB misses", "/IEM/CPU%u/CodeTlb-Misses", idCpu);
308 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
309 "Code TLB revision", "/IEM/CPU%u/CodeTlb-Revision", idCpu);
310 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
311 "Code TLB physical revision", "/IEM/CPU%u/CodeTlb-PhysRev", idCpu);
312 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
313 "Code TLB slow read path", "/IEM/CPU%u/CodeTlb-SlowReads", idCpu);
314
315 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
316 "Data TLB misses", "/IEM/CPU%u/DataTlb-Misses", idCpu);
317 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
318 "Data TLB safe read path", "/IEM/CPU%u/DataTlb-SafeReads", idCpu);
319 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
320 "Data TLB safe write path", "/IEM/CPU%u/DataTlb-SafeWrites", idCpu);
321 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
322 "Data TLB revision", "/IEM/CPU%u/DataTlb-Revision", idCpu);
323 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
324 "Data TLB physical revision", "/IEM/CPU%u/DataTlb-PhysRev", idCpu);
325
326# ifdef VBOX_WITH_STATISTICS
327 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
328 "Code TLB hits", "/IEM/CPU%u/CodeTlb-Hits", idCpu);
329 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
330 "Data TLB hits", "/IEM/CPU%u/DataTlb-Hits-Other", idCpu);
331# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
332 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
333 "Data TLB native stack access hits", "/IEM/CPU%u/DataTlb-Hits-Native-Stack", idCpu);
334 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
335 "Data TLB native data fetch hits", "/IEM/CPU%u/DataTlb-Hits-Native-Fetch", idCpu);
336 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
337 "Data TLB native data store hits", "/IEM/CPU%u/DataTlb-Hits-Native-Store", idCpu);
338 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
339 "Data TLB native mapped data hits", "/IEM/CPU%u/DataTlb-Hits-Native-Mapped", idCpu);
340# endif
341 char szPat[128];
342 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu);
343 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
344 "Data TLB hits total", "/IEM/CPU%u/DataTlb-Hits", idCpu);
345
346 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Safe*", idCpu);
347 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
348 "Data TLB actual misses", "/IEM/CPU%u/DataTlb-SafeTotal", idCpu);
349 char szVal[128];
350 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/DataTlb-SafeTotal", idCpu);
351 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu);
352 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
353 "Data TLB actual miss rate", "/IEM/CPU%u/DataTlb-SafeRate", idCpu);
354
355# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
356 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
357 "Code TLB native misses on new page", "/IEM/CPU%u/CodeTlb-Misses-New-Page", idCpu);
358 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
359 "Code TLB native misses on new page w/ offset", "/IEM/CPU%u/CodeTlb-Misses-New-Page-With-Offset", idCpu);
360 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
361 "Code TLB native hits on new page", "/IEM/CPU%u/CodeTlb-Hits-New-Page", idCpu);
362 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
363 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/CodeTlb-Hits-New-Page-With-Offset", idCpu);
364# endif
365# endif
366
367#ifdef VBOX_WITH_IEM_RECOMPILER
368 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
369 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
370 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
371 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
372 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
373 "Times TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecBreaks", idCpu);
374
375 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
376 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
377 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
378 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
379 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
380# ifdef VBOX_WITH_STATISTICS
381 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
382 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
383# endif
384 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
385 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/TbPruningNative", idCpu);
386 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
387 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
388 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
389 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
390 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
391 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
392 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
393 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
394 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
395 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
396 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
397 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
398 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
399 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
400
401 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
402 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
403 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
404
405 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
406 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
407 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
408 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
409 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
410 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
411# ifdef VBOX_WITH_STATISTICS
412 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
413 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
414# endif
415
416 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
417 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
418 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
419 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
420
421 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
422 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
423 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
424 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
425 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
426 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
427 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
428 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
429
430 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
431 "Number of threaded calls per TB that have been properly recompiled to native code",
432 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
433 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
434 "Number of threaded calls per TB that could not be recompiler to native code",
435 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
436 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
437 "Number of threaded calls that could not be recompiler to native code",
438 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
439
440 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
441 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
442 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
443 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
444
445# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
446# ifdef VBOX_WITH_STATISTICS
447 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
448 "Number of calls to iemNativeRegAllocFindFree.",
449 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
450# endif
451 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
452 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
453 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
454# ifdef VBOX_WITH_STATISTICS
455 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
456 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
457 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
458 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
459 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
460 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
461 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
462 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
463 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
464
465 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedArithmetic, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
466 "Skipped all status flag updating, arithmetic instructions",
467 "/IEM/CPU%u/re/NativeEFlagsSkippedArithmetic", idCpu);
468 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedLogical, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
469 "Skipped all status flag updating, logical instructions",
470 "/IEM/CPU%u/re/NativeEFlagsSkippedLogical", idCpu);
471
472 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
473 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippable", idCpu);
474 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippable", idCpu);
475 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippable", idCpu);
476 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippable", idCpu);
477 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippable", idCpu);
478
479 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfRequired", idCpu);
480 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfRequired", idCpu);
481 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfRequired", idCpu);
482 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfRequired", idCpu);
483 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfRequired", idCpu);
484 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfRequired", idCpu);
485
486# ifdef IEMLIVENESS_EXTENDED_LAYOUT
487 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfDelayable", idCpu);
488 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfDelayable", idCpu);
489 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfDelayable", idCpu);
490 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfDelayable", idCpu);
491 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfDelayable", idCpu);
492 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfDelayable", idCpu);
493# endif
494
495 /* Sum up all status bits ('_' is a sorting hack). */
496 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fSkippable*", idCpu);
497 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
498 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
499
500 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fRequired*", idCpu);
501 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
502 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusRequired", idCpu);
503
504# ifdef IEMLIVENESS_EXTENDED_LAYOUT
505 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fDelayable*", idCpu);
506 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
507 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
508# endif
509
510 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?f*", idCpu);
511 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
512 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
513
514 /* Ratio of the status bit skippables. */
515 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
516 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
517 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
518 "Total skippable EFLAGS status bit updating percentage",
519 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippablePct", idCpu);
520
521# ifdef IEMLIVENESS_EXTENDED_LAYOUT
522 /* Ratio of the status bit skippables. */
523 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
524 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
525 "Total potentially delayable EFLAGS status bit updating percentage",
526 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayablePct", idCpu);
527# endif
528
529 /* Ratios of individual bits. */
530 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlagsCf*", idCpu) - 3;
531 Assert(szPat[offFlagChar] == 'C');
532 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
533 Assert(szVal[offFlagChar] == 'C');
534 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippablePct", idCpu);
535 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippablePct", idCpu);
536 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippablePct", idCpu);
537 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippablePct", idCpu);
538 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippablePct", idCpu);
539 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippablePct", idCpu);
540
541 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
542 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
543
544#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
545 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
546 "Number of calls to iemNativeSimdRegAllocFindFree.",
547 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
548 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
549 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
550 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
551 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
552 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
553 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
554 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
555 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
556 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
557 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
558 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
559 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
560
561 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks", "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
562 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks", "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
563 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks", "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
564
565 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted", "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
566 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted", "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
567 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted", "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
568#endif
569
570 /* Ratio of the status bit skippables. */
571 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
572 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
573 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
574 "Delayed RIP updating percentage",
575 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
576
577# endif /* VBOX_WITH_STATISTICS */
578# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
579
580#endif /* VBOX_WITH_IEM_RECOMPILER */
581
582 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
583 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
584 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
585 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
586 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
587 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
588
589# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
590 /* Instruction statistics: */
591# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
592 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
593 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
594 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
595 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
596# include "IEMInstructionStatisticsTmpl.h"
597# undef IEM_DO_INSTR_STAT
598# endif
599
600# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
601 /* Threaded function statistics: */
602 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
603 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
604 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
605# endif
606
607#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
608 }
609
610#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
611 /*
612 * Register the per-VM VMX APIC-access page handler type.
613 */
614 if (pVM->cpum.ro.GuestFeatures.fVmx)
615 {
616 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
617 iemVmxApicAccessPageHandler,
618 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
619 AssertLogRelRCReturn(rc, rc);
620 }
621#endif
622
623 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
624 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
625#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
626 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
627#endif
628#ifdef VBOX_WITH_DEBUGGER
629 iemR3RegisterDebuggerCommands();
630#endif
631
632 return VINF_SUCCESS;
633}
634
635
636VMMR3DECL(int) IEMR3Term(PVM pVM)
637{
638 NOREF(pVM);
639 return VINF_SUCCESS;
640}
641
642
643VMMR3DECL(void) IEMR3Relocate(PVM pVM)
644{
645 RT_NOREF(pVM);
646}
647
648
649/**
650 * Gets the name of a generic IEM exit code.
651 *
652 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
653 * @param uExit The IEM exit to name.
654 */
655VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
656{
657 static const char * const s_apszNames[] =
658 {
659 /* external interrupts */
660 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
661 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
662 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
663 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
664 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
665 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
666 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
667 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
668 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
669 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
670 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
671 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
672 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
673 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
674 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
675 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
676 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
677 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
678 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
679 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
680 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
681 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
682 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
683 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
684 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
685 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
686 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
687 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
688 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
689 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
690 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
691 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
692 /* software interrups */
693 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
694 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
695 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
696 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
697 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
698 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
699 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
700 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
701 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
702 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
703 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
704 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
705 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
706 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
707 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
708 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
709 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
710 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
711 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
712 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
713 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
714 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
715 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
716 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
717 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
718 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
719 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
720 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
721 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
722 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
723 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
724 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
725 };
726 if (uExit < RT_ELEMENTS(s_apszNames))
727 return s_apszNames[uExit];
728 return NULL;
729}
730
731
732/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
733static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
734{
735 if (*pfHeader)
736 return;
737 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
738 *pfHeader = true;
739}
740
741
742/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
743static void iemR3InfoTlbPrintSlot(PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe, uint32_t uSlot)
744{
745 pHlp->pfnPrintf(pHlp, "%02x: %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s/%s%s%s/%s %s\n",
746 uSlot,
747 (pTlbe->uTag & IEMTLB_REVISION_MASK) == pTlb->uTlbRevision ? "valid "
748 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
749 : "expired",
750 (pTlbe->uTag & ~IEMTLB_REVISION_MASK) << X86_PAGE_SHIFT,
751 pTlbe->GCPhys, pTlbe->pbMappingR3,
752 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
753 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "NX" : " X",
754 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "RO" : "RW",
755 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
756 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
757 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
758 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
759 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "U" : "-",
760 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "S" : "M",
761 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
762 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired");
763}
764
765
766/** Displays one or more TLB slots. */
767static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
768 uint32_t uSlot, uint32_t cSlots, bool *pfHeader)
769{
770 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
771 {
772 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
773 {
774 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
775 cSlots, RT_ELEMENTS(pTlb->aEntries));
776 cSlots = RT_ELEMENTS(pTlb->aEntries);
777 }
778
779 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
780 while (cSlots-- > 0)
781 {
782 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
783 iemR3InfoTlbPrintSlot(pHlp, pTlb, &Tlbe, uSlot);
784 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
785 }
786 }
787 else
788 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
789 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
790}
791
792
793/** Displays the TLB slot for the given address. */
794static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
795 uint64_t uAddress, bool *pfHeader)
796{
797 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
798
799 uint64_t const uTag = (uAddress << 16) >> (X86_PAGE_SHIFT + 16);
800 uint32_t const uSlot = (uint8_t)uTag;
801 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
802 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
803 Tlbe.uTag == (uTag | pTlb->uTlbRevision) ? "match"
804 : (Tlbe.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
805 iemR3InfoTlbPrintSlot(pHlp, pTlb, &Tlbe, uSlot);
806}
807
808
809/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
810static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
811{
812 /*
813 * This is entirely argument driven.
814 */
815 static RTGETOPTDEF const s_aOptions[] =
816 {
817 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
818 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
819 { "all", 'A', RTGETOPT_REQ_NOTHING },
820 { "--all", 'A', RTGETOPT_REQ_NOTHING },
821 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
822 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
823 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
824 };
825
826 char szDefault[] = "-A";
827 char *papszDefaults[2] = { szDefault, NULL };
828 if (cArgs == 0)
829 {
830 cArgs = 1;
831 papszArgs = papszDefaults;
832 }
833
834 RTGETOPTSTATE State;
835 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
836 AssertRCReturnVoid(rc);
837
838 bool fNeedHeader = true;
839 bool fAddressMode = true;
840 PVMCPU pVCpu = VMMGetCpu(pVM);
841 if (!pVCpu)
842 pVCpu = VMMGetCpuById(pVM, 0);
843
844 RTGETOPTUNION ValueUnion;
845 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
846 {
847 switch (rc)
848 {
849 case 'c':
850 if (ValueUnion.u32 >= pVM->cCpus)
851 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
852 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
853 {
854 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
855 fNeedHeader = true;
856 }
857 break;
858
859 case 'a':
860 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
861 ValueUnion.u64, &fNeedHeader);
862 fAddressMode = true;
863 break;
864
865 case 'A':
866 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
867 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), &fNeedHeader);
868 break;
869
870 case 'r':
871 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
872 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, &fNeedHeader);
873 fAddressMode = false;
874 break;
875
876 case 's':
877 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
878 ValueUnion.u32, 1, &fNeedHeader);
879 fAddressMode = false;
880 break;
881
882 case VINF_GETOPT_NOT_OPTION:
883 if (fAddressMode)
884 {
885 uint64_t uAddr;
886 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
887 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
888 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
889 uAddr, &fNeedHeader);
890 else
891 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
892 }
893 else
894 {
895 uint32_t uSlot;
896 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
897 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
898 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
899 uSlot, 1, &fNeedHeader);
900 else
901 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
902 }
903 break;
904
905 case 'h':
906 pHlp->pfnPrintf(pHlp,
907 "Usage: info %ctlb [options]\n"
908 "\n"
909 "Options:\n"
910 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
911 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
912 " -A, --all, all\n"
913 " Display all the TLB entries (default if no other args).\n"
914 " -a<virt>, --address=<virt>\n"
915 " Shows the TLB entry for the specified guest virtual address.\n"
916 " -r<slot:count>, --range=<slot:count>\n"
917 " Shows the TLB entries for the specified slot range.\n"
918 " -s<slot>,--slot=<slot>\n"
919 " Shows the given TLB slot.\n"
920 "\n"
921 "Non-options are interpreted according to the last -a, -r or -s option,\n"
922 "defaulting to addresses if not preceeded by any of those options.\n"
923 , fITlb ? 'i' : 'd');
924 return;
925
926 default:
927 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
928 return;
929 }
930 }
931}
932
933
934/**
935 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
936 */
937static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
938{
939 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
940}
941
942
943/**
944 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
945 */
946static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
947{
948 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
949}
950
951#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
952/**
953 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
954 */
955static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
956{
957 /*
958 * Parse arguments.
959 */
960 static RTGETOPTDEF const s_aOptions[] =
961 {
962 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
963 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
964 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
965 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
966 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
967 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
968 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
969 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
970 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
971 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
972 };
973
974 RTGETOPTSTATE State;
975 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
976 AssertRCReturnVoid(rc);
977
978 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
979 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
980 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
981 RTGCPHYS GCVirt = NIL_RTGCPTR;
982 uint32_t fFlags = UINT32_MAX;
983
984 RTGETOPTUNION ValueUnion;
985 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
986 {
987 switch (rc)
988 {
989 case 'c':
990 if (ValueUnion.u32 >= pVM->cCpus)
991 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
992 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
993 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
994 break;
995
996 case 'a':
997 GCVirt = ValueUnion.u64;
998 GCPhysPc = NIL_RTGCPHYS;
999 break;
1000
1001 case 'p':
1002 GCVirt = NIL_RTGCPHYS;
1003 GCPhysPc = ValueUnion.u64;
1004 break;
1005
1006 case 'f':
1007 fFlags = ValueUnion.u32;
1008 break;
1009
1010 case 'h':
1011 pHlp->pfnPrintf(pHlp,
1012 "Usage: info %ctlb [options]\n"
1013 "\n"
1014 "Options:\n"
1015 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1016 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1017 " -a<virt>, --address=<virt>\n"
1018 " Shows the TB for the specified guest virtual address.\n"
1019 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1020 " Shows the TB for the specified guest physical address.\n"
1021 " -f<flags>,--flags=<flags>\n"
1022 " The TB flags value (hex) to use when looking up the TB.\n"
1023 "\n"
1024 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1025 return;
1026
1027 default:
1028 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1029 return;
1030 }
1031 }
1032
1033 /* Currently, only do work on the same EMT. */
1034 if (pVCpu != pVCpuThis)
1035 {
1036 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1037 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1038 return;
1039 }
1040
1041 /*
1042 * Defaults.
1043 */
1044 if (GCPhysPc == NIL_RTGCPHYS)
1045 {
1046 if (GCVirt == NIL_RTGCPTR)
1047 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1048 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1049 if (RT_FAILURE(rc))
1050 {
1051 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1052 return;
1053 }
1054 }
1055 if (fFlags == UINT32_MAX)
1056 {
1057 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1058 fFlags = iemCalcExecFlags(pVCpu);
1059 if (pVM->cCpus == 1)
1060 fFlags |= IEM_F_X86_DISREGARD_LOCK;
1061 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
1062 fFlags |= IEMTB_F_INHIBIT_SHADOW;
1063 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1064 fFlags |= IEMTB_F_INHIBIT_NMI;
1065 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
1066 {
1067 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
1068 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
1069 fFlags |= IEMTB_F_CS_LIM_CHECKS;
1070 }
1071 }
1072
1073 /*
1074 * Do the lookup...
1075 *
1076 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
1077 * have much choice since we don't want to increase use counters and
1078 * trigger native recompilation.
1079 */
1080 fFlags &= IEMTB_F_KEY_MASK;
1081 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
1082 uint32_t const idxHash = IEMTBCACHE_HASH_NO_KEY_MASK(pTbCache, fFlags, GCPhysPc);
1083 PCIEMTB pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
1084 while (pTb)
1085 {
1086 if (pTb->GCPhysPc == GCPhysPc)
1087 {
1088 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
1089 {
1090 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
1091 break;
1092 }
1093 }
1094 pTb = pTb->pNext;
1095 }
1096 if (!pTb)
1097 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
1098 else
1099 {
1100 /*
1101 * Disassemble according to type.
1102 */
1103 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
1104 {
1105# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
1106 case IEMTB_F_TYPE_NATIVE:
1107 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - native\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1108 iemNativeDisassembleTb(pTb, pHlp);
1109 break;
1110# endif
1111
1112 case IEMTB_F_TYPE_THREADED:
1113 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - threaded\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1114 iemThreadedDisassembleTb(pTb, pHlp);
1115 break;
1116
1117 default:
1118 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - ??? %#x\n",
1119 GCPhysPc, fFlags, pVCpu->idCpu, pTb, pTb->fFlags);
1120 break;
1121 }
1122 }
1123}
1124#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
1125
1126
1127#ifdef VBOX_WITH_DEBUGGER
1128
1129/** @callback_method_impl{FNDBGCCMD,
1130 * Implements the '.alliem' command. }
1131 */
1132static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
1133{
1134 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
1135 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
1136 if (pVCpu)
1137 {
1138 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAll, 1, pVCpu);
1139 return VINF_SUCCESS;
1140 }
1141 RT_NOREF(paArgs, cArgs);
1142 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
1143}
1144
1145
1146/**
1147 * Called by IEMR3Init to register debugger commands.
1148 */
1149static void iemR3RegisterDebuggerCommands(void)
1150{
1151 /*
1152 * Register debugger commands.
1153 */
1154 static DBGCCMD const s_aCmds[] =
1155 {
1156 {
1157 /* .pszCmd = */ "iemflushtlb",
1158 /* .cArgsMin = */ 0,
1159 /* .cArgsMax = */ 0,
1160 /* .paArgDescs = */ NULL,
1161 /* .cArgDescs = */ 0,
1162 /* .fFlags = */ 0,
1163 /* .pfnHandler = */ iemR3DbgFlushTlbs,
1164 /* .pszSyntax = */ "",
1165 /* .pszDescription = */ "Flushed the code and data TLBs"
1166 },
1167 };
1168
1169 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
1170 AssertLogRelRC(rc);
1171}
1172
1173#endif /* VBOX_WITH_DEBUGGER */
1174
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette