VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp@ 58568

Last change on this file since 58568 was 58568, checked in by vboxsync, 9 years ago

VMM: Added intel broadwell CPU profile.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 39.9 KB
Line 
1/* $Id: CPUMR3Db.cpp 58568 2015-11-04 14:20:54Z vboxsync $ */
2/** @file
3 * CPUM - CPU database part.
4 */
5
6/*
7 * Copyright (C) 2013-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/mm.h>
27
28#include <VBox/err.h>
29#include <iprt/asm-amd64-x86.h>
30#include <iprt/mem.h>
31#include <iprt/string.h>
32
33
34/*********************************************************************************************************************************
35* Structures and Typedefs *
36*********************************************************************************************************************************/
37typedef struct CPUMDBENTRY
38{
39 /** The CPU name. */
40 const char *pszName;
41 /** The full CPU name. */
42 const char *pszFullName;
43 /** The CPU vendor (CPUMCPUVENDOR). */
44 uint8_t enmVendor;
45 /** The CPU family. */
46 uint8_t uFamily;
47 /** The CPU model. */
48 uint8_t uModel;
49 /** The CPU stepping. */
50 uint8_t uStepping;
51 /** The microarchitecture. */
52 CPUMMICROARCH enmMicroarch;
53 /** Scalable bus frequency used for reporting other frequencies. */
54 uint64_t uScalableBusFreq;
55 /** Flags (TBD). */
56 uint32_t fFlags;
57 /** The maximum physical address with of the CPU. This should correspond to
58 * the value in CPUID leaf 0x80000008 when present. */
59 uint8_t cMaxPhysAddrWidth;
60 /** Pointer to an array of CPUID leaves. */
61 PCCPUMCPUIDLEAF paCpuIdLeaves;
62 /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
63 uint32_t cCpuIdLeaves;
64 /** The method used to deal with unknown CPUID leaves. */
65 CPUMUNKNOWNCPUID enmUnknownCpuId;
66 /** The default unknown CPUID value. */
67 CPUMCPUID DefUnknownCpuId;
68
69 /** MSR mask. Several microarchitectures ignore higher bits of the */
70 uint32_t fMsrMask;
71
72 /** The number of ranges in the table pointed to b paMsrRanges. */
73 uint32_t cMsrRanges;
74 /** MSR ranges for this CPU. */
75 PCCPUMMSRRANGE paMsrRanges;
76} CPUMDBENTRY;
77
78
79/*********************************************************************************************************************************
80* Defined Constants And Macros *
81*********************************************************************************************************************************/
82
83/** @def NULL_ALONE
84 * For eliminating an unnecessary data dependency in standalone builds (for
85 * VBoxSVC). */
86/** @def ZERO_ALONE
87 * For eliminating an unnecessary data size dependency in standalone builds (for
88 * VBoxSVC). */
89#ifndef CPUM_DB_STANDALONE
90# define NULL_ALONE(a_aTable) a_aTable
91# define ZERO_ALONE(a_cTable) a_cTable
92#else
93# define NULL_ALONE(a_aTable) NULL
94# define ZERO_ALONE(a_cTable) 0
95#endif
96
97
98/** @name Short macros for the MSR range entries.
99 *
100 * These are rather cryptic, but this is to reduce the attack on the right
101 * margin.
102 *
103 * @{ */
104/** Alias one MSR onto another (a_uTarget). */
105#define MAL(a_uMsr, a_szName, a_uTarget) \
106 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
107/** Functions handles everything. */
108#define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
109 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
110/** Functions handles everything, with GP mask. */
111#define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
112 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
113/** Function handlers, read-only. */
114#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
115 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
116/** Function handlers, ignore all writes. */
117#define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
118 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
119/** Function handlers, with value. */
120#define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
121 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
122/** Function handlers, with write ignore mask. */
123#define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
124 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
125/** Function handlers, extended version. */
126#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
127 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
128/** Function handlers, with CPUMCPU storage variable. */
129#define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
130 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
131 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
132/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
133#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
134 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
135 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
136/** Read-only fixed value. */
137#define MVO(a_uMsr, a_szName, a_uValue) \
138 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
139/** Read-only fixed value, ignores all writes. */
140#define MVI(a_uMsr, a_szName, a_uValue) \
141 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
142/** Read fixed value, ignore writes outside GP mask. */
143#define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
144 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
145/** Read fixed value, extended version with both GP and ignore masks. */
146#define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
147 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
148/** The short form, no CPUM backing. */
149#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
150 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
151 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
152
153/** Range: Functions handles everything. */
154#define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
155 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
156/** Range: Read fixed value, read-only. */
157#define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
158 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
159/** Range: Read fixed value, ignore writes. */
160#define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
161 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
162/** Range: The short form, no CPUM backing. */
163#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
164 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
165 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
166
167/** Internal form used by the macros. */
168#ifdef VBOX_WITH_STATISTICS
169# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
170 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
171 { 0 }, { 0 }, { 0 }, { 0 } }
172#else
173# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
174 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
175#endif
176/** @} */
177
178
179#include "cpus/Intel_Core_i7_5600U.h"
180#include "cpus/Intel_Core_i7_3960X.h"
181#include "cpus/Intel_Core_i5_3570.h"
182#include "cpus/Intel_Core_i7_2635QM.h"
183#include "cpus/Intel_Xeon_X5482_3_20GHz.h"
184#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
185#include "cpus/Intel_Pentium_4_3_00GHz.h"
186#include "cpus/Intel_Atom_330_1_60GHz.h"
187
188#include "cpus/AMD_FX_8150_Eight_Core.h"
189#include "cpus/AMD_Phenom_II_X6_1100T.h"
190#include "cpus/Quad_Core_AMD_Opteron_2384.h"
191#include "cpus/AMD_Athlon_64_X2_Dual_Core_4200.h"
192#include "cpus/AMD_Athlon_64_3200.h"
193
194#include "cpus/VIA_QuadCore_L4700_1_2_GHz.h"
195
196
197
198/**
199 * The database entries.
200 *
201 * 1. The first entry is special. It is the fallback for unknown
202 * processors. Thus, it better be pretty representative.
203 *
204 * 2. The first entry for a CPU vendor is likewise important as it is
205 * the default entry for that vendor.
206 *
207 * Generally we put the most recent CPUs first, since these tend to have the
208 * most complicated and backwards compatible list of MSRs.
209 */
210static CPUMDBENTRY const * const g_apCpumDbEntries[] =
211{
212#ifdef VBOX_CPUDB_Intel_Core_i7_5600U
213 &g_Entry_Intel_Core_i7_5600U,
214#endif
215#ifdef VBOX_CPUDB_Intel_Core_i5_3570
216 &g_Entry_Intel_Core_i5_3570,
217#endif
218#ifdef VBOX_CPUDB_Intel_Core_i7_3960X
219 &g_Entry_Intel_Core_i7_3960X,
220#endif
221#ifdef VBOX_CPUDB_Intel_Core_i7_2635QM
222 &g_Entry_Intel_Core_i7_2635QM,
223#endif
224#ifdef VBOX_CPUDB_Intel_Atom_330_1_60GHz
225 &g_Entry_Intel_Atom_330_1_60GHz,
226#endif
227#ifdef Intel_Pentium_M_processor_2_00GHz
228 &g_Entry_Intel_Pentium_M_processor_2_00GHz,
229#endif
230#ifdef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz
231 &g_Entry_Intel_Xeon_X5482_3_20GHz,
232#endif
233#ifdef VBOX_CPUDB_Intel_Pentium_4_3_00GHz
234 &g_Entry_Intel_Pentium_4_3_00GHz,
235#endif
236
237#ifdef VBOX_CPUDB_AMD_FX_8150_Eight_Core
238 &g_Entry_AMD_FX_8150_Eight_Core,
239#endif
240#ifdef VBOX_CPUDB_AMD_Phenom_II_X6_1100T
241 &g_Entry_AMD_Phenom_II_X6_1100T,
242#endif
243#ifdef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384
244 &g_Entry_Quad_Core_AMD_Opteron_2384,
245#endif
246#ifdef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200
247 &g_Entry_AMD_Athlon_64_X2_Dual_Core_4200,
248#endif
249#ifdef VBOX_CPUDB_AMD_Athlon_64_3200
250 &g_Entry_AMD_Athlon_64_3200,
251#endif
252
253#ifdef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz
254 &g_Entry_VIA_QuadCore_L4700_1_2_GHz,
255#endif
256};
257
258
259#ifndef CPUM_DB_STANDALONE
260
261/**
262 * Binary search used by cpumR3MsrRangesInsert and has some special properties
263 * wrt to mismatches.
264 *
265 * @returns Insert location.
266 * @param paMsrRanges The MSR ranges to search.
267 * @param cMsrRanges The number of MSR ranges.
268 * @param uMsr What to search for.
269 */
270static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
271{
272 if (!cMsrRanges)
273 return 0;
274
275 uint32_t iStart = 0;
276 uint32_t iLast = cMsrRanges - 1;
277 for (;;)
278 {
279 uint32_t i = iStart + (iLast - iStart + 1) / 2;
280 if ( uMsr >= paMsrRanges[i].uFirst
281 && uMsr <= paMsrRanges[i].uLast)
282 return i;
283 if (uMsr < paMsrRanges[i].uFirst)
284 {
285 if (i <= iStart)
286 return i;
287 iLast = i - 1;
288 }
289 else
290 {
291 if (i >= iLast)
292 {
293 if (i < cMsrRanges)
294 i++;
295 return i;
296 }
297 iStart = i + 1;
298 }
299 }
300}
301
302
303/**
304 * Ensures that there is space for at least @a cNewRanges in the table,
305 * reallocating the table if necessary.
306 *
307 * @returns Pointer to the MSR ranges on success, NULL on failure. On failure
308 * @a *ppaMsrRanges is freed and set to NULL.
309 * @param pVM The cross context VM structure. If NULL,
310 * use the process heap, otherwise the VM's hyper heap.
311 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
312 * @param cMsrRanges The current number of ranges.
313 * @param cNewRanges The number of ranges to be added.
314 */
315static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
316{
317 uint32_t cMsrRangesAllocated;
318 if (!pVM)
319 cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16);
320 else
321 {
322 /*
323 * We're using the hyper heap now, but when the range array was copied over to it from
324 * the host-context heap, we only copy the exact size and not the ensured size.
325 * See @bugref{7270}.
326 */
327 cMsrRangesAllocated = cMsrRanges;
328 }
329 if (cMsrRangesAllocated < cMsrRanges + cNewRanges)
330 {
331 void *pvNew;
332 uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
333 if (pVM)
334 {
335 Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
336 Assert(cMsrRanges == pVM->cpum.s.GuestInfo.cMsrRanges);
337
338 size_t cb = cMsrRangesAllocated * sizeof(**ppaMsrRanges);
339 size_t cbNew = cNew * sizeof(**ppaMsrRanges);
340 int rc = MMR3HyperRealloc(pVM, *ppaMsrRanges, cb, 32, MM_TAG_CPUM_MSRS, cbNew, &pvNew);
341 if (RT_FAILURE(rc))
342 {
343 *ppaMsrRanges = NULL;
344 pVM->cpum.s.GuestInfo.paMsrRangesR0 = NIL_RTR0PTR;
345 pVM->cpum.s.GuestInfo.paMsrRangesRC = NIL_RTRCPTR;
346 LogRel(("CPUM: cpumR3MsrRangesEnsureSpace: MMR3HyperRealloc failed. rc=%Rrc\n", rc));
347 return NULL;
348 }
349 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
350 }
351 else
352 {
353 pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
354 if (!pvNew)
355 {
356 RTMemFree(*ppaMsrRanges);
357 *ppaMsrRanges = NULL;
358 return NULL;
359 }
360 }
361 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
362 }
363
364 if (pVM)
365 {
366 /* Update R0 and RC pointers. */
367 Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
368 pVM->cpum.s.GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, *ppaMsrRanges);
369 pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, *ppaMsrRanges);
370 }
371
372 return *ppaMsrRanges;
373}
374
375
376/**
377 * Inserts a new MSR range in into an sorted MSR range array.
378 *
379 * If the new MSR range overlaps existing ranges, the existing ones will be
380 * adjusted/removed to fit in the new one.
381 *
382 * @returns VBox status code.
383 * @retval VINF_SUCCESS
384 * @retval VERR_NO_MEMORY
385 *
386 * @param pVM The cross context VM structure. If NULL,
387 * use the process heap, otherwise the VM's hyper heap.
388 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
389 * Must be NULL if using the hyper heap.
390 * @param pcMsrRanges The variable holding number of ranges. Must be NULL
391 * if using the hyper heap.
392 * @param pNewRange The new range.
393 */
394int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
395{
396 Assert(pNewRange->uLast >= pNewRange->uFirst);
397 Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
398 Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
399
400 /*
401 * Validate and use the VM's MSR ranges array if we are using the hyper heap.
402 */
403 if (pVM)
404 {
405 AssertReturn(!ppaMsrRanges, VERR_INVALID_PARAMETER);
406 AssertReturn(!pcMsrRanges, VERR_INVALID_PARAMETER);
407
408 ppaMsrRanges = &pVM->cpum.s.GuestInfo.paMsrRangesR3;
409 pcMsrRanges = &pVM->cpum.s.GuestInfo.cMsrRanges;
410 }
411
412 uint32_t cMsrRanges = *pcMsrRanges;
413 PCPUMMSRRANGE paMsrRanges = *ppaMsrRanges;
414
415 /*
416 * Optimize the linear insertion case where we add new entries at the end.
417 */
418 if ( cMsrRanges > 0
419 && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
420 {
421 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
422 if (!paMsrRanges)
423 return VERR_NO_MEMORY;
424 paMsrRanges[cMsrRanges] = *pNewRange;
425 *pcMsrRanges += 1;
426 }
427 else
428 {
429 uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
430 Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
431 Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
432
433 /*
434 * Adding an entirely new entry?
435 */
436 if ( i >= cMsrRanges
437 || pNewRange->uLast < paMsrRanges[i].uFirst)
438 {
439 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
440 if (!paMsrRanges)
441 return VERR_NO_MEMORY;
442 if (i < cMsrRanges)
443 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
444 paMsrRanges[i] = *pNewRange;
445 *pcMsrRanges += 1;
446 }
447 /*
448 * Replace existing entry?
449 */
450 else if ( pNewRange->uFirst == paMsrRanges[i].uFirst
451 && pNewRange->uLast == paMsrRanges[i].uLast)
452 paMsrRanges[i] = *pNewRange;
453 /*
454 * Splitting an existing entry?
455 */
456 else if ( pNewRange->uFirst > paMsrRanges[i].uFirst
457 && pNewRange->uLast < paMsrRanges[i].uLast)
458 {
459 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 2);
460 if (!paMsrRanges)
461 return VERR_NO_MEMORY;
462 if (i < cMsrRanges)
463 memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
464 paMsrRanges[i + 1] = *pNewRange;
465 paMsrRanges[i + 2] = paMsrRanges[i];
466 paMsrRanges[i ].uLast = pNewRange->uFirst - 1;
467 paMsrRanges[i + 2].uFirst = pNewRange->uLast + 1;
468 *pcMsrRanges += 2;
469 }
470 /*
471 * Complicated scenarios that can affect more than one range.
472 *
473 * The current code does not optimize memmove calls when replacing
474 * one or more existing ranges, because it's tedious to deal with and
475 * not expected to be a frequent usage scenario.
476 */
477 else
478 {
479 /* Adjust start of first match? */
480 if ( pNewRange->uFirst <= paMsrRanges[i].uFirst
481 && pNewRange->uLast < paMsrRanges[i].uLast)
482 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
483 else
484 {
485 /* Adjust end of first match? */
486 if (pNewRange->uFirst > paMsrRanges[i].uFirst)
487 {
488 Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
489 paMsrRanges[i].uLast = pNewRange->uFirst - 1;
490 i++;
491 }
492 /* Replace the whole first match (lazy bird). */
493 else
494 {
495 if (i + 1 < cMsrRanges)
496 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
497 cMsrRanges = *pcMsrRanges -= 1;
498 }
499
500 /* Do the new range affect more ranges? */
501 while ( i < cMsrRanges
502 && pNewRange->uLast >= paMsrRanges[i].uFirst)
503 {
504 if (pNewRange->uLast < paMsrRanges[i].uLast)
505 {
506 /* Adjust the start of it, then we're done. */
507 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
508 break;
509 }
510
511 /* Remove it entirely. */
512 if (i + 1 < cMsrRanges)
513 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
514 cMsrRanges = *pcMsrRanges -= 1;
515 }
516 }
517
518 /* Now, perform a normal insertion. */
519 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
520 if (!paMsrRanges)
521 return VERR_NO_MEMORY;
522 if (i < cMsrRanges)
523 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
524 paMsrRanges[i] = *pNewRange;
525 *pcMsrRanges += 1;
526 }
527 }
528
529 return VINF_SUCCESS;
530}
531
532
533/**
534 * Worker for cpumR3MsrApplyFudge that applies one table.
535 *
536 * @returns VBox status code.
537 * @param pVM The cross context VM structure.
538 * @param paRanges Array of MSRs to fudge.
539 * @param cRanges Number of MSRs in the array.
540 */
541static int cpumR3MsrApplyFudgeTable(PVM pVM, PCCPUMMSRRANGE paRanges, size_t cRanges)
542{
543 for (uint32_t i = 0; i < cRanges; i++)
544 if (!cpumLookupMsrRange(pVM, paRanges[i].uFirst))
545 {
546 LogRel(("CPUM: MSR fudge: %#010x %s\n", paRanges[i].uFirst, paRanges[i].szName));
547 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
548 &paRanges[i]);
549 if (RT_FAILURE(rc))
550 return rc;
551 }
552 return VINF_SUCCESS;
553}
554
555
556/**
557 * Fudges the MSRs that guest are known to access in some odd cases.
558 *
559 * A typical example is a VM that has been moved between different hosts where
560 * for instance the cpu vendor differs.
561 *
562 * @returns VBox status code.
563 * @param pVM The cross context VM structure.
564 */
565int cpumR3MsrApplyFudge(PVM pVM)
566{
567 /*
568 * Basic.
569 */
570 static CPUMMSRRANGE const s_aFudgeMsrs[] =
571 {
572 MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr),
573 MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX),
574 MVO(0x00000017, "IA32_PLATFORM_ID", 0),
575 MFN(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase),
576 MVI(0x0000008b, "BIOS_SIGN", 0),
577 MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0),
578 MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x005, 0, 0),
579 MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0),
580 MFN(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable),
581 MFN(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl),
582 MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp),
583 MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp),
584 MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp),
585 MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp),
586 MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
587 MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
588 MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
589 };
590 int rc = cpumR3MsrApplyFudgeTable(pVM, &s_aFudgeMsrs[0], RT_ELEMENTS(s_aFudgeMsrs));
591 AssertLogRelRCReturn(rc, rc);
592
593 /*
594 * XP might mistake opterons and other newer CPUs for P4s.
595 */
596 if (pVM->cpum.s.GuestFeatures.uFamily >= 0xf)
597 {
598 static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
599 {
600 MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
601 };
602 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aP4FudgeMsrs[0], RT_ELEMENTS(s_aP4FudgeMsrs));
603 AssertLogRelRCReturn(rc, rc);
604 }
605
606 return rc;
607}
608
609
610/**
611 * Do we consider @a enmConsider a better match for @a enmTarget than
612 * @a enmFound?
613 *
614 * Only called when @a enmConsider isn't exactly what we're looking for.
615 *
616 * @returns true/false.
617 * @param enmConsider The new microarch to consider.
618 * @param enmTarget The target microarch.
619 * @param enmFound The best microarch match we've found thus far.
620 */
621DECLINLINE(bool) cpumR3DbIsBetterMarchMatch(CPUMMICROARCH enmConsider, CPUMMICROARCH enmTarget, CPUMMICROARCH enmFound)
622{
623 Assert(enmConsider != enmTarget);
624
625 /*
626 * If we've got an march match, don't bother with enmConsider.
627 */
628 if (enmFound == enmTarget)
629 return false;
630
631 /*
632 * Found is below: Pick 'consider' if it's closer to the target or above it.
633 */
634 if (enmFound < enmTarget)
635 return enmConsider > enmFound;
636
637 /*
638 * Found is above: Pick 'consider' if it's also above (paranoia: or equal)
639 * and but closer to the target.
640 */
641 return enmConsider >= enmTarget && enmConsider < enmFound;
642}
643
644
645/**
646 * Do we consider @a enmConsider a better match for @a enmTarget than
647 * @a enmFound?
648 *
649 * Only called for intel family 06h CPUs.
650 *
651 * @returns true/false.
652 * @param enmConsider The new microarch to consider.
653 * @param enmTarget The target microarch.
654 * @param enmFound The best microarch match we've found thus far.
655 */
656static bool cpumR3DbIsBetterIntelFam06Match(CPUMMICROARCH enmConsider, CPUMMICROARCH enmTarget, CPUMMICROARCH enmFound)
657{
658 /* Check intel family 06h claims. */
659 AssertReturn(enmConsider >= kCpumMicroarch_Intel_P6_Core_Atom_First && enmConsider <= kCpumMicroarch_Intel_P6_Core_Atom_End,
660 false);
661 AssertReturn(enmTarget >= kCpumMicroarch_Intel_P6_Core_Atom_First && enmTarget <= kCpumMicroarch_Intel_P6_Core_Atom_End,
662 false);
663
664 /* Put matches out of the way. */
665 if (enmConsider == enmTarget)
666 return true;
667 if (enmFound == enmTarget)
668 return false;
669
670 /* If found isn't a family 06h march, whatever we're considering must be a better choice. */
671 if ( enmFound < kCpumMicroarch_Intel_P6_Core_Atom_First
672 || enmFound > kCpumMicroarch_Intel_P6_Core_Atom_End)
673 return true;
674
675 /*
676 * The family 06h stuff is split into three categories:
677 * - Common P6 heritage
678 * - Core
679 * - Atom
680 *
681 * Determin which of the three arguments are Atom marchs, because that's
682 * all we need to make the right choice.
683 */
684 bool const fConsiderAtom = enmConsider >= kCpumMicroarch_Intel_Atom_First;
685 bool const fTargetAtom = enmTarget >= kCpumMicroarch_Intel_Atom_First;
686 bool const fFoundAtom = enmFound >= kCpumMicroarch_Intel_Atom_First;
687
688 /*
689 * Want atom:
690 */
691 if (fTargetAtom)
692 {
693 /* Pick the atom if we've got one of each.*/
694 if (fConsiderAtom != fFoundAtom)
695 return fConsiderAtom;
696 /* If we haven't got any atoms under consideration, pick a P6 or the earlier core.
697 Note! Not entirely sure Dothan is the best choice, but it'll do for now. */
698 if (!fConsiderAtom)
699 {
700 if (enmConsider > enmFound)
701 return enmConsider <= kCpumMicroarch_Intel_P6_M_Dothan;
702 return enmFound > kCpumMicroarch_Intel_P6_M_Dothan;
703 }
704 /* else: same category, default comparison rules. */
705 Assert(fConsiderAtom && fFoundAtom);
706 }
707 /*
708 * Want non-atom:
709 */
710 /* Pick the non-atom if we've got one of each. */
711 else if (fConsiderAtom != fFoundAtom)
712 return fFoundAtom;
713 /* If we've only got atoms under consideration, pick the older one just to pick something. */
714 else if (fConsiderAtom)
715 return enmConsider < enmFound;
716 else
717 Assert(!fConsiderAtom && !fFoundAtom);
718
719 /*
720 * Same basic category. Do same compare as caller.
721 */
722 return cpumR3DbIsBetterMarchMatch(enmConsider, enmTarget, enmFound);
723}
724
725
726int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo)
727{
728 CPUMDBENTRY const *pEntry = NULL;
729 int rc;
730
731 if (!strcmp(pszName, "host"))
732 {
733 /*
734 * Create a CPU database entry for the host CPU. This means getting
735 * the CPUID bits from the real CPU and grabbing the closest matching
736 * database entry for MSRs.
737 */
738 rc = CPUMR3CpuIdDetectUnknownLeafMethod(&pInfo->enmUnknownCpuIdMethod, &pInfo->DefCpuId);
739 if (RT_FAILURE(rc))
740 return rc;
741 rc = CPUMR3CpuIdCollectLeaves(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
742 if (RT_FAILURE(rc))
743 return rc;
744
745 /* Lookup database entry for MSRs. */
746 CPUMCPUVENDOR const enmVendor = CPUMR3CpuIdDetectVendorEx(pInfo->paCpuIdLeavesR3[0].uEax,
747 pInfo->paCpuIdLeavesR3[0].uEbx,
748 pInfo->paCpuIdLeavesR3[0].uEcx,
749 pInfo->paCpuIdLeavesR3[0].uEdx);
750 uint32_t const uStd1Eax = pInfo->paCpuIdLeavesR3[1].uEax;
751 uint8_t const uFamily = ASMGetCpuFamily(uStd1Eax);
752 uint8_t const uModel = ASMGetCpuModel(uStd1Eax, enmVendor == CPUMCPUVENDOR_INTEL);
753 uint8_t const uStepping = ASMGetCpuStepping(uStd1Eax);
754 CPUMMICROARCH const enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(enmVendor, uFamily, uModel, uStepping);
755
756 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
757 {
758 CPUMDBENTRY const *pCur = g_apCpumDbEntries[i];
759 if ((CPUMCPUVENDOR)pCur->enmVendor == enmVendor)
760 {
761 /* Match against Family, Microarch, model and stepping. Except
762 for family, always match the closer with preference given to
763 the later/older ones. */
764 if (pCur->uFamily == uFamily)
765 {
766 if (pCur->enmMicroarch == enmMicroarch)
767 {
768 if (pCur->uModel == uModel)
769 {
770 if (pCur->uStepping == uStepping)
771 {
772 /* Perfect match. */
773 pEntry = pCur;
774 break;
775 }
776
777 if ( !pEntry
778 || pEntry->uModel != uModel
779 || pEntry->enmMicroarch != enmMicroarch
780 || pEntry->uFamily != uFamily)
781 pEntry = pCur;
782 else if ( pCur->uStepping >= uStepping
783 ? pCur->uStepping < pEntry->uStepping || pEntry->uStepping < uStepping
784 : pCur->uStepping > pEntry->uStepping)
785 pEntry = pCur;
786 }
787 else if ( !pEntry
788 || pEntry->enmMicroarch != enmMicroarch
789 || pEntry->uFamily != uFamily)
790 pEntry = pCur;
791 else if ( pCur->uModel >= uModel
792 ? pCur->uModel < pEntry->uModel || pEntry->uModel < uModel
793 : pCur->uModel > pEntry->uModel)
794 pEntry = pCur;
795 }
796 else if ( !pEntry
797 || pEntry->uFamily != uFamily)
798 pEntry = pCur;
799 /* Special march matching rules applies to intel family 06h. */
800 else if ( enmVendor == CPUMCPUVENDOR_INTEL
801 && uFamily == 6
802 ? cpumR3DbIsBetterIntelFam06Match(pCur->enmMicroarch, enmMicroarch, pEntry->enmMicroarch)
803 : cpumR3DbIsBetterMarchMatch(pCur->enmMicroarch, enmMicroarch, pEntry->enmMicroarch))
804 pEntry = pCur;
805 }
806 /* We don't do closeness matching on family, we use the first
807 entry for the CPU vendor instead. (P4 workaround.) */
808 else if (!pEntry)
809 pEntry = pCur;
810 }
811 }
812
813 if (pEntry)
814 LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
815 CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
816 pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
817 pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
818 else
819 {
820 pEntry = g_apCpumDbEntries[0];
821 LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'\n",
822 CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
823 pEntry->pszName));
824 }
825 }
826 else
827 {
828 /*
829 * We're supposed to be emulating a specific CPU that is included in
830 * our CPU database. The CPUID tables needs to be copied onto the
831 * heap so the caller can modify them and so they can be freed like
832 * in the host case above.
833 */
834 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
835 if (!strcmp(pszName, g_apCpumDbEntries[i]->pszName))
836 {
837 pEntry = g_apCpumDbEntries[i];
838 break;
839 }
840 if (!pEntry)
841 {
842 LogRel(("CPUM: Cannot locate any CPU by the name '%s'\n", pszName));
843 return VERR_CPUM_DB_CPU_NOT_FOUND;
844 }
845
846 pInfo->cCpuIdLeaves = pEntry->cCpuIdLeaves;
847 if (pEntry->cCpuIdLeaves)
848 {
849 pInfo->paCpuIdLeavesR3 = (PCPUMCPUIDLEAF)RTMemDup(pEntry->paCpuIdLeaves,
850 sizeof(pEntry->paCpuIdLeaves[0]) * pEntry->cCpuIdLeaves);
851 if (!pInfo->paCpuIdLeavesR3)
852 return VERR_NO_MEMORY;
853 }
854 else
855 pInfo->paCpuIdLeavesR3 = NULL;
856
857 pInfo->enmUnknownCpuIdMethod = pEntry->enmUnknownCpuId;
858 pInfo->DefCpuId = pEntry->DefUnknownCpuId;
859
860 LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
861 pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor),
862 pEntry->uFamily, pEntry->uModel, pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
863 }
864
865 pInfo->fMsrMask = pEntry->fMsrMask;
866 pInfo->iFirstExtCpuIdLeaf = 0; /* Set by caller. */
867 pInfo->uPadding = 0;
868 pInfo->uScalableBusFreq = pEntry->uScalableBusFreq;
869 pInfo->paCpuIdLeavesR0 = NIL_RTR0PTR;
870 pInfo->paMsrRangesR0 = NIL_RTR0PTR;
871 pInfo->paCpuIdLeavesRC = NIL_RTRCPTR;
872 pInfo->paMsrRangesRC = NIL_RTRCPTR;
873
874 /*
875 * Copy the MSR range.
876 */
877 uint32_t cMsrs = 0;
878 PCPUMMSRRANGE paMsrs = NULL;
879
880 PCCPUMMSRRANGE pCurMsr = pEntry->paMsrRanges;
881 uint32_t cLeft = pEntry->cMsrRanges;
882 while (cLeft-- > 0)
883 {
884 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &paMsrs, &cMsrs, pCurMsr);
885 if (RT_FAILURE(rc))
886 {
887 Assert(!paMsrs); /* The above function frees this. */
888 RTMemFree(pInfo->paCpuIdLeavesR3);
889 pInfo->paCpuIdLeavesR3 = NULL;
890 return rc;
891 }
892 pCurMsr++;
893 }
894
895 pInfo->paMsrRangesR3 = paMsrs;
896 pInfo->cMsrRanges = cMsrs;
897 return VINF_SUCCESS;
898}
899
900
901/**
902 * Insert an MSR range into the VM.
903 *
904 * If the new MSR range overlaps existing ranges, the existing ones will be
905 * adjusted/removed to fit in the new one.
906 *
907 * @returns VBox status code.
908 * @param pVM The cross context VM structure.
909 * @param pNewRange Pointer to the MSR range being inserted.
910 */
911VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange)
912{
913 AssertReturn(pVM, VERR_INVALID_PARAMETER);
914 AssertReturn(pNewRange, VERR_INVALID_PARAMETER);
915
916 return cpumR3MsrRangesInsert(pVM, NULL /* ppaMsrRanges */, NULL /* pcMsrRanges */, pNewRange);
917}
918
919
920/**
921 * Register statistics for the MSRs.
922 *
923 * This must not be called before the MSRs have been finalized and moved to the
924 * hyper heap.
925 *
926 * @returns VBox status code.
927 * @param pVM The cross context VM structure.
928 */
929int cpumR3MsrRegStats(PVM pVM)
930{
931 /*
932 * Global statistics.
933 */
934 PCPUM pCpum = &pVM->cpum.s;
935 STAM_REL_REG(pVM, &pCpum->cMsrReads, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Reads",
936 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
937 STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsRaisingGP",
938 STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
939 STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsUnknown",
940 STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
941 STAM_REL_REG(pVM, &pCpum->cMsrWrites, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Writes",
942 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
943 STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesRaisingGP",
944 STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
945 STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
946 STAMUNIT_OCCURENCES, "Writing of ignored bits.");
947 STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesUnknown",
948 STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
949
950
951# ifdef VBOX_WITH_STATISTICS
952 /*
953 * Per range.
954 */
955 PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
956 uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges;
957 for (uint32_t i = 0; i < cRanges; i++)
958 {
959 char szName[160];
960 ssize_t cchName;
961
962 if (paRanges[i].uFirst == paRanges[i].uLast)
963 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
964 paRanges[i].uFirst, paRanges[i].szName);
965 else
966 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
967 paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
968
969 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
970 STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
971
972 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
973 STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
974
975 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
976 STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
977
978 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
979 STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
980 }
981# endif /* VBOX_WITH_STATISTICS */
982
983 return VINF_SUCCESS;
984}
985
986#endif /* !CPUM_DB_STANDALONE */
987
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette