VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 102850

Last change on this file since 102850 was 102850, checked in by vboxsync, 11 months ago

VMM/IEM: Implemented the first of two code TLB lookups. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 40.9 KB
Line 
1/* $Id: IEMR3.cpp 102850 2024-01-12 00:47:47Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#include <VBox/vmm/iem.h>
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/dbgf.h>
36#include <VBox/vmm/mm.h>
37#if defined(VBOX_VMM_TARGET_ARMV8)
38# include "IEMInternal-armv8.h"
39#else
40# include "IEMInternal.h"
41#endif
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/vmapi.h>
44#include <VBox/err.h>
45#ifdef VBOX_WITH_DEBUGGER
46# include <VBox/dbg.h>
47#endif
48
49#include <iprt/assert.h>
50#include <iprt/getopt.h>
51#include <iprt/string.h>
52
53
54/*********************************************************************************************************************************
55* Internal Functions *
56*********************************************************************************************************************************/
57static FNDBGFINFOARGVINT iemR3InfoITlb;
58static FNDBGFINFOARGVINT iemR3InfoDTlb;
59#ifdef VBOX_WITH_DEBUGGER
60static void iemR3RegisterDebuggerCommands(void);
61#endif
62
63
64#if !defined(VBOX_VMM_TARGET_ARMV8)
65static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
66{
67 switch (enmTargetCpu)
68 {
69#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
70 CASE_RET_STR(IEMTARGETCPU_8086);
71 CASE_RET_STR(IEMTARGETCPU_V20);
72 CASE_RET_STR(IEMTARGETCPU_186);
73 CASE_RET_STR(IEMTARGETCPU_286);
74 CASE_RET_STR(IEMTARGETCPU_386);
75 CASE_RET_STR(IEMTARGETCPU_486);
76 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
77 CASE_RET_STR(IEMTARGETCPU_PPRO);
78 CASE_RET_STR(IEMTARGETCPU_CURRENT);
79#undef CASE_RET_STR
80 default: return "Unknown";
81 }
82}
83#endif
84
85
86/**
87 * Initializes the interpreted execution manager.
88 *
89 * This must be called after CPUM as we're quering information from CPUM about
90 * the guest and host CPUs.
91 *
92 * @returns VBox status code.
93 * @param pVM The cross context VM structure.
94 */
95VMMR3DECL(int) IEMR3Init(PVM pVM)
96{
97 /*
98 * Read configuration.
99 */
100#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
101 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
102 int rc;
103#endif
104
105#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
106 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
107 * Controls whether the custom VBox specific CPUID host call interface is
108 * enabled or not. */
109# ifdef DEBUG_bird
110 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
111# else
112 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
113# endif
114 AssertLogRelRCReturn(rc, rc);
115#endif
116
117#ifdef VBOX_WITH_IEM_RECOMPILER
118 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
119 * Max number of TBs per EMT. */
120 uint32_t cMaxTbs = 0;
121 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
122 AssertLogRelRCReturn(rc, rc);
123 if (cMaxTbs < _16K || cMaxTbs > _8M)
124 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
125 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
126
127 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
128 * Initial (minimum) number of TBs per EMT in ring-3. */
129 uint32_t cInitialTbs = 0;
130 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
131 AssertLogRelRCReturn(rc, rc);
132 if (cInitialTbs < _16K || cInitialTbs > _8M)
133 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
134 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
135
136 /* Check that the two values makes sense together. Expect user/api to do
137 the right thing or get lost. */
138 if (cInitialTbs > cMaxTbs)
139 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
140 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
141 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
142
143 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
144 * Max executable memory for recompiled code per EMT. */
145 uint64_t cbMaxExec = 0;
146 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
147 AssertLogRelRCReturn(rc, rc);
148 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
149 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
150 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
151 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
152
153 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
154 * The executable memory allocator chunk size. */
155 uint32_t cbChunkExec = 0;
156 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
157 AssertLogRelRCReturn(rc, rc);
158 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
159 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
160 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
161 cbChunkExec, cbChunkExec, _1M, _256M);
162
163 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
164 * The initial executable memory allocator size (per EMT). The value is
165 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
166 uint64_t cbInitialExec = 0;
167 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
168 AssertLogRelRCReturn(rc, rc);
169 if (cbInitialExec > cbMaxExec)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
171 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
172 cbInitialExec, cbInitialExec, cbMaxExec);
173
174#endif /* VBOX_WITH_IEM_RECOMPILER*/
175
176 /*
177 * Initialize per-CPU data and register statistics.
178 */
179 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
180 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
181
182 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
183 {
184 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
185 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
186
187 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
188 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
189
190 /*
191 * Host and guest CPU information.
192 */
193 if (idCpu == 0)
194 {
195 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
196 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
197#if !defined(VBOX_VMM_TARGET_ARMV8)
198 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
199 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
200 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
201# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
202 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
203 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
204 else
205# endif
206 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
207#else
208 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
209 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
210#endif
211
212#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
213 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
214 {
215 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
216 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
217 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
218 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
219 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
220 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
221 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
222 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
223 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
224 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
225 }
226 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
227 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
228 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
229#else
230 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
231 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
232 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
233#endif
234 }
235 else
236 {
237 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
238 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
239 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
240 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
241#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
242 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
243#endif
244 }
245
246 /*
247 * Mark all buffers free.
248 */
249 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
250 while (iMemMap-- > 0)
251 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
252 }
253
254
255#ifdef VBOX_WITH_IEM_RECOMPILER
256 /*
257 * Initialize the TB allocator and cache (/ hash table).
258 *
259 * This is done by each EMT to try get more optimal thread/numa locality of
260 * the allocations.
261 */
262 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
263 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
264 AssertLogRelRCReturn(rc, rc);
265#endif
266
267 /*
268 * Register statistics.
269 */
270 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
271 {
272#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
273 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
274
275 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
276 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
277 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
278 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
279 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
280 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
281 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
282 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
283 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
284 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
285 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
286 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
287 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
288 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
289 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
290 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
291 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
292 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
293
294 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
295 "Code TLB misses", "/IEM/CPU%u/CodeTlb-Misses", idCpu);
296 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
297 "Code TLB revision", "/IEM/CPU%u/CodeTlb-Revision", idCpu);
298 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
299 "Code TLB physical revision", "/IEM/CPU%u/CodeTlb-PhysRev", idCpu);
300 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
301 "Code TLB slow read path", "/IEM/CPU%u/CodeTlb-SlowReads", idCpu);
302
303 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
304 "Data TLB misses", "/IEM/CPU%u/DataTlb-Misses", idCpu);
305 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
306 "Data TLB safe read path", "/IEM/CPU%u/DataTlb-SafeReads", idCpu);
307 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
308 "Data TLB safe write path", "/IEM/CPU%u/DataTlb-SafeWrites", idCpu);
309 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
310 "Data TLB revision", "/IEM/CPU%u/DataTlb-Revision", idCpu);
311 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
312 "Data TLB physical revision", "/IEM/CPU%u/DataTlb-PhysRev", idCpu);
313
314# ifdef VBOX_WITH_STATISTICS
315 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
316 "Code TLB hits", "/IEM/CPU%u/CodeTlb-Hits", idCpu);
317 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
318 "Data TLB hits", "/IEM/CPU%u/DataTlb-Hits-Other", idCpu);
319# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
320 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
321 "Data TLB native stack access hits", "/IEM/CPU%u/DataTlb-Hits-Native-Stack", idCpu);
322 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
323 "Data TLB native data fetch hits", "/IEM/CPU%u/DataTlb-Hits-Native-Fetch", idCpu);
324 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
325 "Data TLB native data store hits", "/IEM/CPU%u/DataTlb-Hits-Native-Store", idCpu);
326 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
327 "Data TLB native mapped data hits", "/IEM/CPU%u/DataTlb-Hits-Native-Mapped", idCpu);
328# endif
329 char szPat[128];
330 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu);
331 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
332 "Data TLB hits total", "/IEM/CPU%u/DataTlb-Hits", idCpu);
333
334 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Safe*", idCpu);
335 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
336 "Data TLB actual misses", "/IEM/CPU%u/DataTlb-SafeTotal", idCpu);
337 char szValue[128];
338 RTStrPrintf(szValue, sizeof(szValue), "/IEM/CPU%u/DataTlb-SafeTotal", idCpu);
339 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu);
340 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szValue, szPat,
341 "Data TLB actual miss rate", "/IEM/CPU%u/DataTlb-SafeRate", idCpu);
342
343# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
344 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
345 "Code TLB native misses on new page", "/IEM/CPU%u/CodeTlb-Misses-New-Page", idCpu);
346 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
347 "Code TLB native misses on new page w/ offset", "/IEM/CPU%u/CodeTlb-Misses-New-Page-With-Offset", idCpu);
348 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
349 "Code TLB native hits on new page", "/IEM/CPU%u/CodeTlb-Hits-New-Page", idCpu);
350 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
351 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/CodeTlb-Hits-New-Page-With-Offset", idCpu);
352# endif
353# endif
354
355#ifdef VBOX_WITH_IEM_RECOMPILER
356 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
357 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
358 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
359 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
360 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
361 "Times TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecBreaks", idCpu);
362
363 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
364 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
365 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
366 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
367 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
368# ifdef VBOX_WITH_STATISTICS
369 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
370 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
371# endif
372 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
373 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/TbPruningNative", idCpu);
374 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
375 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
376 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
377 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
378 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
379 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
380 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
381 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
382 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
383 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
384 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
385 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
386 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
387 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
388
389 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
390 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
391 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
392
393 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
394 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
395 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
396 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
397 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
398 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
399# ifdef VBOX_WITH_STATISTICS
400 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
401 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
402# endif
403
404 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
405 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
406 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
407 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
408
409 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
410 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
411 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
412 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
413 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
414 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
415 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
416 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
417
418 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
419 "Number of threaded calls per TB that have been properly recompiled to native code",
420 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
421 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
422 "Number of threaded calls per TB that could not be recompiler to native code",
423 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
424 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
425 "Number of threaded calls that could not be recompiler to native code",
426 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
427
428 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
429 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
430 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
431 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
432#endif /* VBOX_WITH_IEM_RECOMPILER */
433
434 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
435 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
436 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
437 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
438 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
439 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
440
441# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
442 /* Instruction statistics: */
443# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
444 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
445 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
446 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
447 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
448# include "IEMInstructionStatisticsTmpl.h"
449# undef IEM_DO_INSTR_STAT
450# endif
451
452#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
453 }
454
455#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
456 /*
457 * Register the per-VM VMX APIC-access page handler type.
458 */
459 if (pVM->cpum.ro.GuestFeatures.fVmx)
460 {
461 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
462 iemVmxApicAccessPageHandler,
463 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
464 AssertLogRelRCReturn(rc, rc);
465 }
466#endif
467
468 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
469 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
470#ifdef VBOX_WITH_DEBUGGER
471 iemR3RegisterDebuggerCommands();
472#endif
473
474 return VINF_SUCCESS;
475}
476
477
478VMMR3DECL(int) IEMR3Term(PVM pVM)
479{
480 NOREF(pVM);
481 return VINF_SUCCESS;
482}
483
484
485VMMR3DECL(void) IEMR3Relocate(PVM pVM)
486{
487 RT_NOREF(pVM);
488}
489
490
491/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
492static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
493{
494 if (*pfHeader)
495 return;
496 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
497 *pfHeader = true;
498}
499
500
501/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
502static void iemR3InfoTlbPrintSlot(PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe, uint32_t uSlot)
503{
504 pHlp->pfnPrintf(pHlp, "%02x: %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s/%s%s%s/%s %s\n",
505 uSlot,
506 (pTlbe->uTag & IEMTLB_REVISION_MASK) == pTlb->uTlbRevision ? "valid "
507 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
508 : "expired",
509 (pTlbe->uTag & ~IEMTLB_REVISION_MASK) << X86_PAGE_SHIFT,
510 pTlbe->GCPhys, pTlbe->pbMappingR3,
511 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
512 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "NX" : " X",
513 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "RO" : "RW",
514 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
515 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
516 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
517 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
518 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "U" : "-",
519 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "S" : "M",
520 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
521 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired");
522}
523
524
525/** Displays one or more TLB slots. */
526static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
527 uint32_t uSlot, uint32_t cSlots, bool *pfHeader)
528{
529 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
530 {
531 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
532 {
533 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
534 cSlots, RT_ELEMENTS(pTlb->aEntries));
535 cSlots = RT_ELEMENTS(pTlb->aEntries);
536 }
537
538 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
539 while (cSlots-- > 0)
540 {
541 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
542 iemR3InfoTlbPrintSlot(pHlp, pTlb, &Tlbe, uSlot);
543 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
544 }
545 }
546 else
547 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
548 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
549}
550
551
552/** Displays the TLB slot for the given address. */
553static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
554 uint64_t uAddress, bool *pfHeader)
555{
556 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
557
558 uint64_t const uTag = (uAddress << 16) >> (X86_PAGE_SHIFT + 16);
559 uint32_t const uSlot = (uint8_t)uTag;
560 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
561 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
562 Tlbe.uTag == (uTag | pTlb->uTlbRevision) ? "match"
563 : (Tlbe.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
564 iemR3InfoTlbPrintSlot(pHlp, pTlb, &Tlbe, uSlot);
565}
566
567
568/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
569static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
570{
571 /*
572 * This is entirely argument driven.
573 */
574 static RTGETOPTDEF const s_aOptions[] =
575 {
576 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
577 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
578 { "all", 'A', RTGETOPT_REQ_NOTHING },
579 { "--all", 'A', RTGETOPT_REQ_NOTHING },
580 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
581 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
582 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
583 };
584
585 char szDefault[] = "-A";
586 char *papszDefaults[2] = { szDefault, NULL };
587 if (cArgs == 0)
588 {
589 cArgs = 1;
590 papszArgs = papszDefaults;
591 }
592
593 RTGETOPTSTATE State;
594 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
595 AssertRCReturnVoid(rc);
596
597 bool fNeedHeader = true;
598 bool fAddressMode = true;
599 PVMCPU pVCpu = VMMGetCpu(pVM);
600 if (!pVCpu)
601 pVCpu = VMMGetCpuById(pVM, 0);
602
603 RTGETOPTUNION ValueUnion;
604 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
605 {
606 switch (rc)
607 {
608 case 'c':
609 if (ValueUnion.u32 >= pVM->cCpus)
610 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
611 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
612 {
613 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
614 fNeedHeader = true;
615 }
616 break;
617
618 case 'a':
619 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
620 ValueUnion.u64, &fNeedHeader);
621 fAddressMode = true;
622 break;
623
624 case 'A':
625 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
626 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), &fNeedHeader);
627 break;
628
629 case 'r':
630 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
631 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, &fNeedHeader);
632 fAddressMode = false;
633 break;
634
635 case 's':
636 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
637 ValueUnion.u32, 1, &fNeedHeader);
638 fAddressMode = false;
639 break;
640
641 case VINF_GETOPT_NOT_OPTION:
642 if (fAddressMode)
643 {
644 uint64_t uAddr;
645 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
646 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
647 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
648 uAddr, &fNeedHeader);
649 else
650 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
651 }
652 else
653 {
654 uint32_t uSlot;
655 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
656 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
657 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
658 uSlot, 1, &fNeedHeader);
659 else
660 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
661 }
662 break;
663
664 case 'h':
665 pHlp->pfnPrintf(pHlp,
666 "Usage: info %ctlb [options]\n"
667 "\n"
668 "Options:\n"
669 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
670 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
671 " -A, --all, all\n"
672 " Display all the TLB entries (default if no other args).\n"
673 " -a<virt>, --address=<virt>\n"
674 " Shows the TLB entry for the specified guest virtual address.\n"
675 " -r<slot:count>, --range=<slot:count>\n"
676 " Shows the TLB entries for the specified slot range.\n"
677 " -s<slot>,--slot=<slot>\n"
678 " Shows the given TLB slot.\n"
679 "\n"
680 "Non-options are interpreted according to the last -a, -r or -s option,\n"
681 "defaulting to addresses if not preceeded by any of those options.\n"
682 , fITlb ? 'i' : 'd');
683 return;
684
685 default:
686 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
687 return;
688 }
689 }
690}
691
692
693/**
694 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
695 */
696static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
697{
698 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
699}
700
701
702/**
703 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
704 */
705static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
706{
707 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
708}
709
710
711#ifdef VBOX_WITH_DEBUGGER
712
713/** @callback_method_impl{FNDBGCCMD,
714 * Implements the '.alliem' command. }
715 */
716static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
717{
718 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
719 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
720 if (pVCpu)
721 {
722 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAll, 1, pVCpu);
723 return VINF_SUCCESS;
724 }
725 RT_NOREF(paArgs, cArgs);
726 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
727}
728
729
730/**
731 * Called by IEMR3Init to register debugger commands.
732 */
733static void iemR3RegisterDebuggerCommands(void)
734{
735 /*
736 * Register debugger commands.
737 */
738 static DBGCCMD const s_aCmds[] =
739 {
740 {
741 /* .pszCmd = */ "iemflushtlb",
742 /* .cArgsMin = */ 0,
743 /* .cArgsMax = */ 0,
744 /* .paArgDescs = */ NULL,
745 /* .cArgDescs = */ 0,
746 /* .fFlags = */ 0,
747 /* .pfnHandler = */ iemR3DbgFlushTlbs,
748 /* .pszSyntax = */ "",
749 /* .pszDescription = */ "Flushed the code and data TLBs"
750 },
751 };
752
753 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
754 AssertLogRelRC(rc);
755}
756
757#endif
758
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette