VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp@ 68155

Last change on this file since 68155 was 66403, checked in by vboxsync, 8 years ago

CPUM: Added the MXCSR mask to the CPU database and CPUM::GuestInfo as well as the host one to CPUM::fHostMxCsrMask. Need it for correctly implementing LDMXCSR, FXRSTOR and XSTOR.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 42.0 KB
Line 
1/* $Id: CPUMR3Db.cpp 66403 2017-04-03 15:21:26Z vboxsync $ */
2/** @file
3 * CPUM - CPU database part.
4 */
5
6/*
7 * Copyright (C) 2013-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/mm.h>
27
28#include <VBox/err.h>
29#include <iprt/asm-amd64-x86.h>
30#include <iprt/mem.h>
31#include <iprt/string.h>
32
33
34/*********************************************************************************************************************************
35* Structures and Typedefs *
36*********************************************************************************************************************************/
37typedef struct CPUMDBENTRY
38{
39 /** The CPU name. */
40 const char *pszName;
41 /** The full CPU name. */
42 const char *pszFullName;
43 /** The CPU vendor (CPUMCPUVENDOR). */
44 uint8_t enmVendor;
45 /** The CPU family. */
46 uint8_t uFamily;
47 /** The CPU model. */
48 uint8_t uModel;
49 /** The CPU stepping. */
50 uint8_t uStepping;
51 /** The microarchitecture. */
52 CPUMMICROARCH enmMicroarch;
53 /** Scalable bus frequency used for reporting other frequencies. */
54 uint64_t uScalableBusFreq;
55 /** Flags - CPUDB_F_XXX. */
56 uint32_t fFlags;
57 /** The maximum physical address with of the CPU. This should correspond to
58 * the value in CPUID leaf 0x80000008 when present. */
59 uint8_t cMaxPhysAddrWidth;
60 /** The MXCSR mask. */
61 uint32_t fMxCsrMask;
62 /** Pointer to an array of CPUID leaves. */
63 PCCPUMCPUIDLEAF paCpuIdLeaves;
64 /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
65 uint32_t cCpuIdLeaves;
66 /** The method used to deal with unknown CPUID leaves. */
67 CPUMUNKNOWNCPUID enmUnknownCpuId;
68 /** The default unknown CPUID value. */
69 CPUMCPUID DefUnknownCpuId;
70
71 /** MSR mask. Several microarchitectures ignore the higher bits of ECX in
72 * the RDMSR and WRMSR instructions. */
73 uint32_t fMsrMask;
74
75 /** The number of ranges in the table pointed to b paMsrRanges. */
76 uint32_t cMsrRanges;
77 /** MSR ranges for this CPU. */
78 PCCPUMMSRRANGE paMsrRanges;
79} CPUMDBENTRY;
80
81
82/*********************************************************************************************************************************
83* Defined Constants And Macros *
84*********************************************************************************************************************************/
85/** @name CPUDB_F_XXX - CPUDBENTRY::fFlags
86 * @{ */
87/** Should execute all in IEM.
88 * @todo Implement this - currently done in Main... */
89#define CPUDB_F_EXECUTE_ALL_IN_IEM RT_BIT_32(0)
90/** @} */
91
92
93/** @def NULL_ALONE
94 * For eliminating an unnecessary data dependency in standalone builds (for
95 * VBoxSVC). */
96/** @def ZERO_ALONE
97 * For eliminating an unnecessary data size dependency in standalone builds (for
98 * VBoxSVC). */
99#ifndef CPUM_DB_STANDALONE
100# define NULL_ALONE(a_aTable) a_aTable
101# define ZERO_ALONE(a_cTable) a_cTable
102#else
103# define NULL_ALONE(a_aTable) NULL
104# define ZERO_ALONE(a_cTable) 0
105#endif
106
107
108/** @name Short macros for the MSR range entries.
109 *
110 * These are rather cryptic, but this is to reduce the attack on the right
111 * margin.
112 *
113 * @{ */
114/** Alias one MSR onto another (a_uTarget). */
115#define MAL(a_uMsr, a_szName, a_uTarget) \
116 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
117/** Functions handles everything. */
118#define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
119 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
120/** Functions handles everything, with GP mask. */
121#define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
122 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
123/** Function handlers, read-only. */
124#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
125 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
126/** Function handlers, ignore all writes. */
127#define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
128 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
129/** Function handlers, with value. */
130#define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
131 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
132/** Function handlers, with write ignore mask. */
133#define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
134 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
135/** Function handlers, extended version. */
136#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
137 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
138/** Function handlers, with CPUMCPU storage variable. */
139#define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
140 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
141 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
142/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
143#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
144 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
145 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
146/** Read-only fixed value. */
147#define MVO(a_uMsr, a_szName, a_uValue) \
148 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
149/** Read-only fixed value, ignores all writes. */
150#define MVI(a_uMsr, a_szName, a_uValue) \
151 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
152/** Read fixed value, ignore writes outside GP mask. */
153#define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
154 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
155/** Read fixed value, extended version with both GP and ignore masks. */
156#define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
157 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
158/** The short form, no CPUM backing. */
159#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
160 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
161 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
162
163/** Range: Functions handles everything. */
164#define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
165 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
166/** Range: Read fixed value, read-only. */
167#define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
168 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
169/** Range: Read fixed value, ignore writes. */
170#define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
171 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
172/** Range: The short form, no CPUM backing. */
173#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
174 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
175 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
176
177/** Internal form used by the macros. */
178#ifdef VBOX_WITH_STATISTICS
179# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
180 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
181 { 0 }, { 0 }, { 0 }, { 0 } }
182#else
183# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
184 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
185#endif
186/** @} */
187
188#ifndef CPUM_DB_STANDALONE
189
190#include "cpus/Intel_Core_i7_6700K.h"
191#include "cpus/Intel_Core_i7_5600U.h"
192#include "cpus/Intel_Core_i7_3960X.h"
193#include "cpus/Intel_Core_i5_3570.h"
194#include "cpus/Intel_Core_i7_2635QM.h"
195#include "cpus/Intel_Xeon_X5482_3_20GHz.h"
196#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
197#include "cpus/Intel_Pentium_4_3_00GHz.h"
198#include "cpus/Intel_Pentium_N3530_2_16GHz.h"
199#include "cpus/Intel_Atom_330_1_60GHz.h"
200#include "cpus/Intel_80386.h"
201#include "cpus/Intel_80286.h"
202#include "cpus/Intel_80186.h"
203#include "cpus/Intel_8086.h"
204
205#include "cpus/AMD_FX_8150_Eight_Core.h"
206#include "cpus/AMD_Phenom_II_X6_1100T.h"
207#include "cpus/Quad_Core_AMD_Opteron_2384.h"
208#include "cpus/AMD_Athlon_64_X2_Dual_Core_4200.h"
209#include "cpus/AMD_Athlon_64_3200.h"
210
211#include "cpus/VIA_QuadCore_L4700_1_2_GHz.h"
212
213
214
215/**
216 * The database entries.
217 *
218 * 1. The first entry is special. It is the fallback for unknown
219 * processors. Thus, it better be pretty representative.
220 *
221 * 2. The first entry for a CPU vendor is likewise important as it is
222 * the default entry for that vendor.
223 *
224 * Generally we put the most recent CPUs first, since these tend to have the
225 * most complicated and backwards compatible list of MSRs.
226 */
227static CPUMDBENTRY const * const g_apCpumDbEntries[] =
228{
229#ifdef VBOX_CPUDB_Intel_Core_i7_6700K
230 &g_Entry_Intel_Core_i7_6700K,
231#endif
232#ifdef VBOX_CPUDB_Intel_Core_i7_5600U
233 &g_Entry_Intel_Core_i7_5600U,
234#endif
235#ifdef VBOX_CPUDB_Intel_Core_i5_3570
236 &g_Entry_Intel_Core_i5_3570,
237#endif
238#ifdef VBOX_CPUDB_Intel_Core_i7_3960X
239 &g_Entry_Intel_Core_i7_3960X,
240#endif
241#ifdef VBOX_CPUDB_Intel_Core_i7_2635QM
242 &g_Entry_Intel_Core_i7_2635QM,
243#endif
244#ifdef VBOX_CPUDB_Intel_Pentium_N3530_2_16GHz
245 &g_Entry_Intel_Pentium_N3530_2_16GHz,
246#endif
247#ifdef VBOX_CPUDB_Intel_Atom_330_1_60GHz
248 &g_Entry_Intel_Atom_330_1_60GHz,
249#endif
250#ifdef Intel_Pentium_M_processor_2_00GHz
251 &g_Entry_Intel_Pentium_M_processor_2_00GHz,
252#endif
253#ifdef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz
254 &g_Entry_Intel_Xeon_X5482_3_20GHz,
255#endif
256#ifdef VBOX_CPUDB_Intel_Pentium_4_3_00GHz
257 &g_Entry_Intel_Pentium_4_3_00GHz,
258#endif
259#ifdef VBOX_CPUDB_Intel_80486
260 &g_Entry_Intel_80486,
261#endif
262#ifdef VBOX_CPUDB_Intel_80386
263 &g_Entry_Intel_80386,
264#endif
265#ifdef VBOX_CPUDB_Intel_80286
266 &g_Entry_Intel_80286,
267#endif
268#ifdef VBOX_CPUDB_Intel_80186
269 &g_Entry_Intel_80186,
270#endif
271#ifdef VBOX_CPUDB_Intel_8086
272 &g_Entry_Intel_8086,
273#endif
274
275#ifdef VBOX_CPUDB_AMD_FX_8150_Eight_Core
276 &g_Entry_AMD_FX_8150_Eight_Core,
277#endif
278#ifdef VBOX_CPUDB_AMD_Phenom_II_X6_1100T
279 &g_Entry_AMD_Phenom_II_X6_1100T,
280#endif
281#ifdef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384
282 &g_Entry_Quad_Core_AMD_Opteron_2384,
283#endif
284#ifdef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200
285 &g_Entry_AMD_Athlon_64_X2_Dual_Core_4200,
286#endif
287#ifdef VBOX_CPUDB_AMD_Athlon_64_3200
288 &g_Entry_AMD_Athlon_64_3200,
289#endif
290
291#ifdef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz
292 &g_Entry_VIA_QuadCore_L4700_1_2_GHz,
293#endif
294
295#ifdef VBOX_CPUDB_NEC_V20
296 &g_Entry_NEC_V20,
297#endif
298};
299
300
301
302/**
303 * Binary search used by cpumR3MsrRangesInsert and has some special properties
304 * wrt to mismatches.
305 *
306 * @returns Insert location.
307 * @param paMsrRanges The MSR ranges to search.
308 * @param cMsrRanges The number of MSR ranges.
309 * @param uMsr What to search for.
310 */
311static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
312{
313 if (!cMsrRanges)
314 return 0;
315
316 uint32_t iStart = 0;
317 uint32_t iLast = cMsrRanges - 1;
318 for (;;)
319 {
320 uint32_t i = iStart + (iLast - iStart + 1) / 2;
321 if ( uMsr >= paMsrRanges[i].uFirst
322 && uMsr <= paMsrRanges[i].uLast)
323 return i;
324 if (uMsr < paMsrRanges[i].uFirst)
325 {
326 if (i <= iStart)
327 return i;
328 iLast = i - 1;
329 }
330 else
331 {
332 if (i >= iLast)
333 {
334 if (i < cMsrRanges)
335 i++;
336 return i;
337 }
338 iStart = i + 1;
339 }
340 }
341}
342
343
344/**
345 * Ensures that there is space for at least @a cNewRanges in the table,
346 * reallocating the table if necessary.
347 *
348 * @returns Pointer to the MSR ranges on success, NULL on failure. On failure
349 * @a *ppaMsrRanges is freed and set to NULL.
350 * @param pVM The cross context VM structure. If NULL,
351 * use the process heap, otherwise the VM's hyper heap.
352 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
353 * @param cMsrRanges The current number of ranges.
354 * @param cNewRanges The number of ranges to be added.
355 */
356static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
357{
358 uint32_t cMsrRangesAllocated;
359 if (!pVM)
360 cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16);
361 else
362 {
363 /*
364 * We're using the hyper heap now, but when the range array was copied over to it from
365 * the host-context heap, we only copy the exact size and not the ensured size.
366 * See @bugref{7270}.
367 */
368 cMsrRangesAllocated = cMsrRanges;
369 }
370 if (cMsrRangesAllocated < cMsrRanges + cNewRanges)
371 {
372 void *pvNew;
373 uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
374 if (pVM)
375 {
376 Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
377 Assert(cMsrRanges == pVM->cpum.s.GuestInfo.cMsrRanges);
378
379 size_t cb = cMsrRangesAllocated * sizeof(**ppaMsrRanges);
380 size_t cbNew = cNew * sizeof(**ppaMsrRanges);
381 int rc = MMR3HyperRealloc(pVM, *ppaMsrRanges, cb, 32, MM_TAG_CPUM_MSRS, cbNew, &pvNew);
382 if (RT_FAILURE(rc))
383 {
384 *ppaMsrRanges = NULL;
385 pVM->cpum.s.GuestInfo.paMsrRangesR0 = NIL_RTR0PTR;
386 pVM->cpum.s.GuestInfo.paMsrRangesRC = NIL_RTRCPTR;
387 LogRel(("CPUM: cpumR3MsrRangesEnsureSpace: MMR3HyperRealloc failed. rc=%Rrc\n", rc));
388 return NULL;
389 }
390 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
391 }
392 else
393 {
394 pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
395 if (!pvNew)
396 {
397 RTMemFree(*ppaMsrRanges);
398 *ppaMsrRanges = NULL;
399 return NULL;
400 }
401 }
402 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
403 }
404
405 if (pVM)
406 {
407 /* Update R0 and RC pointers. */
408 Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
409 pVM->cpum.s.GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, *ppaMsrRanges);
410 pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, *ppaMsrRanges);
411 }
412
413 return *ppaMsrRanges;
414}
415
416
417/**
418 * Inserts a new MSR range in into an sorted MSR range array.
419 *
420 * If the new MSR range overlaps existing ranges, the existing ones will be
421 * adjusted/removed to fit in the new one.
422 *
423 * @returns VBox status code.
424 * @retval VINF_SUCCESS
425 * @retval VERR_NO_MEMORY
426 *
427 * @param pVM The cross context VM structure. If NULL,
428 * use the process heap, otherwise the VM's hyper heap.
429 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
430 * Must be NULL if using the hyper heap.
431 * @param pcMsrRanges The variable holding number of ranges. Must be NULL
432 * if using the hyper heap.
433 * @param pNewRange The new range.
434 */
435int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
436{
437 Assert(pNewRange->uLast >= pNewRange->uFirst);
438 Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
439 Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
440
441 /*
442 * Validate and use the VM's MSR ranges array if we are using the hyper heap.
443 */
444 if (pVM)
445 {
446 AssertReturn(!ppaMsrRanges, VERR_INVALID_PARAMETER);
447 AssertReturn(!pcMsrRanges, VERR_INVALID_PARAMETER);
448
449 ppaMsrRanges = &pVM->cpum.s.GuestInfo.paMsrRangesR3;
450 pcMsrRanges = &pVM->cpum.s.GuestInfo.cMsrRanges;
451 }
452 else
453 {
454 AssertReturn(ppaMsrRanges, VERR_INVALID_POINTER);
455 AssertReturn(pcMsrRanges, VERR_INVALID_POINTER);
456 }
457
458 uint32_t cMsrRanges = *pcMsrRanges;
459 PCPUMMSRRANGE paMsrRanges = *ppaMsrRanges;
460
461 /*
462 * Optimize the linear insertion case where we add new entries at the end.
463 */
464 if ( cMsrRanges > 0
465 && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
466 {
467 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
468 if (!paMsrRanges)
469 return VERR_NO_MEMORY;
470 paMsrRanges[cMsrRanges] = *pNewRange;
471 *pcMsrRanges += 1;
472 }
473 else
474 {
475 uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
476 Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
477 Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
478
479 /*
480 * Adding an entirely new entry?
481 */
482 if ( i >= cMsrRanges
483 || pNewRange->uLast < paMsrRanges[i].uFirst)
484 {
485 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
486 if (!paMsrRanges)
487 return VERR_NO_MEMORY;
488 if (i < cMsrRanges)
489 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
490 paMsrRanges[i] = *pNewRange;
491 *pcMsrRanges += 1;
492 }
493 /*
494 * Replace existing entry?
495 */
496 else if ( pNewRange->uFirst == paMsrRanges[i].uFirst
497 && pNewRange->uLast == paMsrRanges[i].uLast)
498 paMsrRanges[i] = *pNewRange;
499 /*
500 * Splitting an existing entry?
501 */
502 else if ( pNewRange->uFirst > paMsrRanges[i].uFirst
503 && pNewRange->uLast < paMsrRanges[i].uLast)
504 {
505 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 2);
506 if (!paMsrRanges)
507 return VERR_NO_MEMORY;
508 if (i < cMsrRanges)
509 memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
510 paMsrRanges[i + 1] = *pNewRange;
511 paMsrRanges[i + 2] = paMsrRanges[i];
512 paMsrRanges[i ].uLast = pNewRange->uFirst - 1;
513 paMsrRanges[i + 2].uFirst = pNewRange->uLast + 1;
514 *pcMsrRanges += 2;
515 }
516 /*
517 * Complicated scenarios that can affect more than one range.
518 *
519 * The current code does not optimize memmove calls when replacing
520 * one or more existing ranges, because it's tedious to deal with and
521 * not expected to be a frequent usage scenario.
522 */
523 else
524 {
525 /* Adjust start of first match? */
526 if ( pNewRange->uFirst <= paMsrRanges[i].uFirst
527 && pNewRange->uLast < paMsrRanges[i].uLast)
528 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
529 else
530 {
531 /* Adjust end of first match? */
532 if (pNewRange->uFirst > paMsrRanges[i].uFirst)
533 {
534 Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
535 paMsrRanges[i].uLast = pNewRange->uFirst - 1;
536 i++;
537 }
538 /* Replace the whole first match (lazy bird). */
539 else
540 {
541 if (i + 1 < cMsrRanges)
542 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
543 cMsrRanges = *pcMsrRanges -= 1;
544 }
545
546 /* Do the new range affect more ranges? */
547 while ( i < cMsrRanges
548 && pNewRange->uLast >= paMsrRanges[i].uFirst)
549 {
550 if (pNewRange->uLast < paMsrRanges[i].uLast)
551 {
552 /* Adjust the start of it, then we're done. */
553 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
554 break;
555 }
556
557 /* Remove it entirely. */
558 if (i + 1 < cMsrRanges)
559 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
560 cMsrRanges = *pcMsrRanges -= 1;
561 }
562 }
563
564 /* Now, perform a normal insertion. */
565 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
566 if (!paMsrRanges)
567 return VERR_NO_MEMORY;
568 if (i < cMsrRanges)
569 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
570 paMsrRanges[i] = *pNewRange;
571 *pcMsrRanges += 1;
572 }
573 }
574
575 return VINF_SUCCESS;
576}
577
578
579/**
580 * Worker for cpumR3MsrApplyFudge that applies one table.
581 *
582 * @returns VBox status code.
583 * @param pVM The cross context VM structure.
584 * @param paRanges Array of MSRs to fudge.
585 * @param cRanges Number of MSRs in the array.
586 */
587static int cpumR3MsrApplyFudgeTable(PVM pVM, PCCPUMMSRRANGE paRanges, size_t cRanges)
588{
589 for (uint32_t i = 0; i < cRanges; i++)
590 if (!cpumLookupMsrRange(pVM, paRanges[i].uFirst))
591 {
592 LogRel(("CPUM: MSR fudge: %#010x %s\n", paRanges[i].uFirst, paRanges[i].szName));
593 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
594 &paRanges[i]);
595 if (RT_FAILURE(rc))
596 return rc;
597 }
598 return VINF_SUCCESS;
599}
600
601
602/**
603 * Fudges the MSRs that guest are known to access in some odd cases.
604 *
605 * A typical example is a VM that has been moved between different hosts where
606 * for instance the cpu vendor differs.
607 *
608 * Another example is older CPU profiles (e.g. Atom Bonnet) for newer CPUs (e.g.
609 * Atom Silvermont), where features reported thru CPUID aren't present in the
610 * MSRs (e.g. AMD64_TSC_AUX).
611 *
612 *
613 * @returns VBox status code.
614 * @param pVM The cross context VM structure.
615 */
616int cpumR3MsrApplyFudge(PVM pVM)
617{
618 /*
619 * Basic.
620 */
621 static CPUMMSRRANGE const s_aFudgeMsrs[] =
622 {
623 MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr),
624 MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX),
625 MVO(0x00000017, "IA32_PLATFORM_ID", 0),
626 MFN(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase),
627 MVI(0x0000008b, "BIOS_SIGN", 0),
628 MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0),
629 MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x005, 0, 0),
630 MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0),
631 MFN(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable),
632 MFN(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl),
633 MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp),
634 MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp),
635 MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp),
636 MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp),
637 MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
638 MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
639 MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
640 };
641 int rc = cpumR3MsrApplyFudgeTable(pVM, &s_aFudgeMsrs[0], RT_ELEMENTS(s_aFudgeMsrs));
642 AssertLogRelRCReturn(rc, rc);
643
644 /*
645 * XP might mistake opterons and other newer CPUs for P4s.
646 */
647 if (pVM->cpum.s.GuestFeatures.uFamily >= 0xf)
648 {
649 static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
650 {
651 MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
652 };
653 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aP4FudgeMsrs[0], RT_ELEMENTS(s_aP4FudgeMsrs));
654 AssertLogRelRCReturn(rc, rc);
655 }
656
657 if (pVM->cpum.s.GuestFeatures.fRdTscP)
658 {
659 static CPUMMSRRANGE const s_aRdTscPFudgeMsrs[] =
660 {
661 MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX),
662 };
663 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aRdTscPFudgeMsrs[0], RT_ELEMENTS(s_aRdTscPFudgeMsrs));
664 AssertLogRelRCReturn(rc, rc);
665 }
666
667 return rc;
668}
669
670
671/**
672 * Do we consider @a enmConsider a better match for @a enmTarget than
673 * @a enmFound?
674 *
675 * Only called when @a enmConsider isn't exactly what we're looking for.
676 *
677 * @returns true/false.
678 * @param enmConsider The new microarch to consider.
679 * @param enmTarget The target microarch.
680 * @param enmFound The best microarch match we've found thus far.
681 */
682DECLINLINE(bool) cpumR3DbIsBetterMarchMatch(CPUMMICROARCH enmConsider, CPUMMICROARCH enmTarget, CPUMMICROARCH enmFound)
683{
684 Assert(enmConsider != enmTarget);
685
686 /*
687 * If we've got an march match, don't bother with enmConsider.
688 */
689 if (enmFound == enmTarget)
690 return false;
691
692 /*
693 * Found is below: Pick 'consider' if it's closer to the target or above it.
694 */
695 if (enmFound < enmTarget)
696 return enmConsider > enmFound;
697
698 /*
699 * Found is above: Pick 'consider' if it's also above (paranoia: or equal)
700 * and but closer to the target.
701 */
702 return enmConsider >= enmTarget && enmConsider < enmFound;
703}
704
705
706/**
707 * Do we consider @a enmConsider a better match for @a enmTarget than
708 * @a enmFound?
709 *
710 * Only called for intel family 06h CPUs.
711 *
712 * @returns true/false.
713 * @param enmConsider The new microarch to consider.
714 * @param enmTarget The target microarch.
715 * @param enmFound The best microarch match we've found thus far.
716 */
717static bool cpumR3DbIsBetterIntelFam06Match(CPUMMICROARCH enmConsider, CPUMMICROARCH enmTarget, CPUMMICROARCH enmFound)
718{
719 /* Check intel family 06h claims. */
720 AssertReturn(enmConsider >= kCpumMicroarch_Intel_P6_Core_Atom_First && enmConsider <= kCpumMicroarch_Intel_P6_Core_Atom_End,
721 false);
722 AssertReturn(enmTarget >= kCpumMicroarch_Intel_P6_Core_Atom_First && enmTarget <= kCpumMicroarch_Intel_P6_Core_Atom_End,
723 false);
724
725 /* Put matches out of the way. */
726 if (enmConsider == enmTarget)
727 return true;
728 if (enmFound == enmTarget)
729 return false;
730
731 /* If found isn't a family 06h march, whatever we're considering must be a better choice. */
732 if ( enmFound < kCpumMicroarch_Intel_P6_Core_Atom_First
733 || enmFound > kCpumMicroarch_Intel_P6_Core_Atom_End)
734 return true;
735
736 /*
737 * The family 06h stuff is split into three categories:
738 * - Common P6 heritage
739 * - Core
740 * - Atom
741 *
742 * Determin which of the three arguments are Atom marchs, because that's
743 * all we need to make the right choice.
744 */
745 bool const fConsiderAtom = enmConsider >= kCpumMicroarch_Intel_Atom_First;
746 bool const fTargetAtom = enmTarget >= kCpumMicroarch_Intel_Atom_First;
747 bool const fFoundAtom = enmFound >= kCpumMicroarch_Intel_Atom_First;
748
749 /*
750 * Want atom:
751 */
752 if (fTargetAtom)
753 {
754 /* Pick the atom if we've got one of each.*/
755 if (fConsiderAtom != fFoundAtom)
756 return fConsiderAtom;
757 /* If we haven't got any atoms under consideration, pick a P6 or the earlier core.
758 Note! Not entirely sure Dothan is the best choice, but it'll do for now. */
759 if (!fConsiderAtom)
760 {
761 if (enmConsider > enmFound)
762 return enmConsider <= kCpumMicroarch_Intel_P6_M_Dothan;
763 return enmFound > kCpumMicroarch_Intel_P6_M_Dothan;
764 }
765 /* else: same category, default comparison rules. */
766 Assert(fConsiderAtom && fFoundAtom);
767 }
768 /*
769 * Want non-atom:
770 */
771 /* Pick the non-atom if we've got one of each. */
772 else if (fConsiderAtom != fFoundAtom)
773 return fFoundAtom;
774 /* If we've only got atoms under consideration, pick the older one just to pick something. */
775 else if (fConsiderAtom)
776 return enmConsider < enmFound;
777 else
778 Assert(!fConsiderAtom && !fFoundAtom);
779
780 /*
781 * Same basic category. Do same compare as caller.
782 */
783 return cpumR3DbIsBetterMarchMatch(enmConsider, enmTarget, enmFound);
784}
785
786
787int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo)
788{
789 CPUMDBENTRY const *pEntry = NULL;
790 int rc;
791
792 if (!strcmp(pszName, "host"))
793 {
794 /*
795 * Create a CPU database entry for the host CPU. This means getting
796 * the CPUID bits from the real CPU and grabbing the closest matching
797 * database entry for MSRs.
798 */
799 rc = CPUMR3CpuIdDetectUnknownLeafMethod(&pInfo->enmUnknownCpuIdMethod, &pInfo->DefCpuId);
800 if (RT_FAILURE(rc))
801 return rc;
802 rc = CPUMR3CpuIdCollectLeaves(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
803 if (RT_FAILURE(rc))
804 return rc;
805 pInfo->fMxCsrMask = CPUMR3DeterminHostMxCsrMask();
806
807 /* Lookup database entry for MSRs. */
808 CPUMCPUVENDOR const enmVendor = CPUMR3CpuIdDetectVendorEx(pInfo->paCpuIdLeavesR3[0].uEax,
809 pInfo->paCpuIdLeavesR3[0].uEbx,
810 pInfo->paCpuIdLeavesR3[0].uEcx,
811 pInfo->paCpuIdLeavesR3[0].uEdx);
812 uint32_t const uStd1Eax = pInfo->paCpuIdLeavesR3[1].uEax;
813 uint8_t const uFamily = ASMGetCpuFamily(uStd1Eax);
814 uint8_t const uModel = ASMGetCpuModel(uStd1Eax, enmVendor == CPUMCPUVENDOR_INTEL);
815 uint8_t const uStepping = ASMGetCpuStepping(uStd1Eax);
816 CPUMMICROARCH const enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(enmVendor, uFamily, uModel, uStepping);
817
818 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
819 {
820 CPUMDBENTRY const *pCur = g_apCpumDbEntries[i];
821 if ((CPUMCPUVENDOR)pCur->enmVendor == enmVendor)
822 {
823 /* Match against Family, Microarch, model and stepping. Except
824 for family, always match the closer with preference given to
825 the later/older ones. */
826 if (pCur->uFamily == uFamily)
827 {
828 if (pCur->enmMicroarch == enmMicroarch)
829 {
830 if (pCur->uModel == uModel)
831 {
832 if (pCur->uStepping == uStepping)
833 {
834 /* Perfect match. */
835 pEntry = pCur;
836 break;
837 }
838
839 if ( !pEntry
840 || pEntry->uModel != uModel
841 || pEntry->enmMicroarch != enmMicroarch
842 || pEntry->uFamily != uFamily)
843 pEntry = pCur;
844 else if ( pCur->uStepping >= uStepping
845 ? pCur->uStepping < pEntry->uStepping || pEntry->uStepping < uStepping
846 : pCur->uStepping > pEntry->uStepping)
847 pEntry = pCur;
848 }
849 else if ( !pEntry
850 || pEntry->enmMicroarch != enmMicroarch
851 || pEntry->uFamily != uFamily)
852 pEntry = pCur;
853 else if ( pCur->uModel >= uModel
854 ? pCur->uModel < pEntry->uModel || pEntry->uModel < uModel
855 : pCur->uModel > pEntry->uModel)
856 pEntry = pCur;
857 }
858 else if ( !pEntry
859 || pEntry->uFamily != uFamily)
860 pEntry = pCur;
861 /* Special march matching rules applies to intel family 06h. */
862 else if ( enmVendor == CPUMCPUVENDOR_INTEL
863 && uFamily == 6
864 ? cpumR3DbIsBetterIntelFam06Match(pCur->enmMicroarch, enmMicroarch, pEntry->enmMicroarch)
865 : cpumR3DbIsBetterMarchMatch(pCur->enmMicroarch, enmMicroarch, pEntry->enmMicroarch))
866 pEntry = pCur;
867 }
868 /* We don't do closeness matching on family, we use the first
869 entry for the CPU vendor instead. (P4 workaround.) */
870 else if (!pEntry)
871 pEntry = pCur;
872 }
873 }
874
875 if (pEntry)
876 LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
877 CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
878 pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
879 pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
880 else
881 {
882 pEntry = g_apCpumDbEntries[0];
883 LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'\n",
884 CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
885 pEntry->pszName));
886 }
887 }
888 else
889 {
890 /*
891 * We're supposed to be emulating a specific CPU that is included in
892 * our CPU database. The CPUID tables needs to be copied onto the
893 * heap so the caller can modify them and so they can be freed like
894 * in the host case above.
895 */
896 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
897 if (!strcmp(pszName, g_apCpumDbEntries[i]->pszName))
898 {
899 pEntry = g_apCpumDbEntries[i];
900 break;
901 }
902 if (!pEntry)
903 {
904 LogRel(("CPUM: Cannot locate any CPU by the name '%s'\n", pszName));
905 return VERR_CPUM_DB_CPU_NOT_FOUND;
906 }
907
908 pInfo->cCpuIdLeaves = pEntry->cCpuIdLeaves;
909 if (pEntry->cCpuIdLeaves)
910 {
911 /* Must allocate a multiple of 16 here, matching cpumR3CpuIdEnsureSpace. */
912 size_t cbExtra = sizeof(pEntry->paCpuIdLeaves[0]) * (RT_ALIGN(pEntry->cCpuIdLeaves, 16) - pEntry->cCpuIdLeaves);
913 pInfo->paCpuIdLeavesR3 = (PCPUMCPUIDLEAF)RTMemDupEx(pEntry->paCpuIdLeaves,
914 sizeof(pEntry->paCpuIdLeaves[0]) * pEntry->cCpuIdLeaves,
915 cbExtra);
916 if (!pInfo->paCpuIdLeavesR3)
917 return VERR_NO_MEMORY;
918 }
919 else
920 pInfo->paCpuIdLeavesR3 = NULL;
921
922 pInfo->enmUnknownCpuIdMethod = pEntry->enmUnknownCpuId;
923 pInfo->DefCpuId = pEntry->DefUnknownCpuId;
924 pInfo->fMxCsrMask = pEntry->fMxCsrMask;
925
926 LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
927 pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor),
928 pEntry->uFamily, pEntry->uModel, pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
929 }
930
931 pInfo->fMsrMask = pEntry->fMsrMask;
932 pInfo->iFirstExtCpuIdLeaf = 0; /* Set by caller. */
933 pInfo->uScalableBusFreq = pEntry->uScalableBusFreq;
934 pInfo->paCpuIdLeavesR0 = NIL_RTR0PTR;
935 pInfo->paMsrRangesR0 = NIL_RTR0PTR;
936 pInfo->paCpuIdLeavesRC = NIL_RTRCPTR;
937 pInfo->paMsrRangesRC = NIL_RTRCPTR;
938
939 /*
940 * Copy the MSR range.
941 */
942 uint32_t cMsrs = 0;
943 PCPUMMSRRANGE paMsrs = NULL;
944
945 PCCPUMMSRRANGE pCurMsr = pEntry->paMsrRanges;
946 uint32_t cLeft = pEntry->cMsrRanges;
947 while (cLeft-- > 0)
948 {
949 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &paMsrs, &cMsrs, pCurMsr);
950 if (RT_FAILURE(rc))
951 {
952 Assert(!paMsrs); /* The above function frees this. */
953 RTMemFree(pInfo->paCpuIdLeavesR3);
954 pInfo->paCpuIdLeavesR3 = NULL;
955 return rc;
956 }
957 pCurMsr++;
958 }
959
960 pInfo->paMsrRangesR3 = paMsrs;
961 pInfo->cMsrRanges = cMsrs;
962 return VINF_SUCCESS;
963}
964
965
966/**
967 * Insert an MSR range into the VM.
968 *
969 * If the new MSR range overlaps existing ranges, the existing ones will be
970 * adjusted/removed to fit in the new one.
971 *
972 * @returns VBox status code.
973 * @param pVM The cross context VM structure.
974 * @param pNewRange Pointer to the MSR range being inserted.
975 */
976VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange)
977{
978 AssertReturn(pVM, VERR_INVALID_PARAMETER);
979 AssertReturn(pNewRange, VERR_INVALID_PARAMETER);
980
981 return cpumR3MsrRangesInsert(pVM, NULL /* ppaMsrRanges */, NULL /* pcMsrRanges */, pNewRange);
982}
983
984
985/**
986 * Register statistics for the MSRs.
987 *
988 * This must not be called before the MSRs have been finalized and moved to the
989 * hyper heap.
990 *
991 * @returns VBox status code.
992 * @param pVM The cross context VM structure.
993 */
994int cpumR3MsrRegStats(PVM pVM)
995{
996 /*
997 * Global statistics.
998 */
999 PCPUM pCpum = &pVM->cpum.s;
1000 STAM_REL_REG(pVM, &pCpum->cMsrReads, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Reads",
1001 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
1002 STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsRaisingGP",
1003 STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
1004 STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsUnknown",
1005 STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
1006 STAM_REL_REG(pVM, &pCpum->cMsrWrites, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Writes",
1007 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
1008 STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesRaisingGP",
1009 STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
1010 STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
1011 STAMUNIT_OCCURENCES, "Writing of ignored bits.");
1012 STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesUnknown",
1013 STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
1014
1015
1016# ifdef VBOX_WITH_STATISTICS
1017 /*
1018 * Per range.
1019 */
1020 PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
1021 uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges;
1022 for (uint32_t i = 0; i < cRanges; i++)
1023 {
1024 char szName[160];
1025 ssize_t cchName;
1026
1027 if (paRanges[i].uFirst == paRanges[i].uLast)
1028 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
1029 paRanges[i].uFirst, paRanges[i].szName);
1030 else
1031 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
1032 paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
1033
1034 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
1035 STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
1036
1037 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
1038 STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
1039
1040 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
1041 STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
1042
1043 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
1044 STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
1045 }
1046# endif /* VBOX_WITH_STATISTICS */
1047
1048 return VINF_SUCCESS;
1049}
1050
1051#endif /* !CPUM_DB_STANDALONE */
1052
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette