VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp@ 51334

Last change on this file since 51334 was 51334, checked in by vboxsync, 11 years ago

VMM/CPUM: Fix NULL ptr deref. due to premature access.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.7 KB
Line 
1/* $Id: CPUMR3Db.cpp 51334 2014-05-22 06:06:18Z vboxsync $ */
2/** @file
3 * CPUM - CPU database part.
4 */
5
6/*
7 * Copyright (C) 2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_CPUM
22#include <VBox/vmm/cpum.h>
23#include "CPUMInternal.h"
24#include <VBox/vmm/vm.h>
25#include <VBox/vmm/mm.h>
26
27#include <VBox/err.h>
28#include <iprt/asm-amd64-x86.h>
29#include <iprt/mem.h>
30#include <iprt/string.h>
31
32
33/*******************************************************************************
34* Structures and Typedefs *
35*******************************************************************************/
36typedef struct CPUMDBENTRY
37{
38 /** The CPU name. */
39 const char *pszName;
40 /** The full CPU name. */
41 const char *pszFullName;
42 /** The CPU vendor (CPUMCPUVENDOR). */
43 uint8_t enmVendor;
44 /** The CPU family. */
45 uint8_t uFamily;
46 /** The CPU model. */
47 uint8_t uModel;
48 /** The CPU stepping. */
49 uint8_t uStepping;
50 /** The microarchitecture. */
51 CPUMMICROARCH enmMicroarch;
52 /** Scalable bus frequency used for reporting other frequencies. */
53 uint64_t uScalableBusFreq;
54 /** Flags (TBD). */
55 uint32_t fFlags;
56 /** The maximum physical address with of the CPU. This should correspond to
57 * the value in CPUID leaf 0x80000008 when present. */
58 uint8_t cMaxPhysAddrWidth;
59 /** Pointer to an array of CPUID leaves. */
60 PCCPUMCPUIDLEAF paCpuIdLeaves;
61 /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
62 uint32_t cCpuIdLeaves;
63 /** The method used to deal with unknown CPUID leaves. */
64 CPUMUKNOWNCPUID enmUnknownCpuId;
65 /** The default unknown CPUID value. */
66 CPUMCPUID DefUnknownCpuId;
67
68 /** MSR mask. Several microarchitectures ignore higher bits of the */
69 uint32_t fMsrMask;
70
71 /** The number of ranges in the table pointed to b paMsrRanges. */
72 uint32_t cMsrRanges;
73 /** MSR ranges for this CPU. */
74 PCCPUMMSRRANGE paMsrRanges;
75} CPUMDBENTRY;
76
77
78/*******************************************************************************
79* Defined Constants And Macros *
80*******************************************************************************/
81
82/** @def NULL_ALONE
83 * For eliminating an unnecessary data dependency in standalone builds (for
84 * VBoxSVC). */
85/** @def ZERO_ALONE
86 * For eliminating an unnecessary data size dependency in standalone builds (for
87 * VBoxSVC). */
88#ifndef CPUM_DB_STANDALONE
89# define NULL_ALONE(a_aTable) a_aTable
90# define ZERO_ALONE(a_cTable) a_cTable
91#else
92# define NULL_ALONE(a_aTable) NULL
93# define ZERO_ALONE(a_cTable) 0
94#endif
95
96
97/** @name Short macros for the MSR range entries.
98 *
99 * These are rather cryptic, but this is to reduce the attack on the right
100 * margin.
101 *
102 * @{ */
103/** Alias one MSR onto another (a_uTarget). */
104#define MAL(a_uMsr, a_szName, a_uTarget) \
105 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
106/** Functions handles everything. */
107#define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
108 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
109/** Functions handles everything, with GP mask. */
110#define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
111 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
112/** Function handlers, read-only. */
113#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
114 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
115/** Function handlers, ignore all writes. */
116#define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
117 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
118/** Function handlers, with value. */
119#define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
120 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
121/** Function handlers, with write ignore mask. */
122#define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
123 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
124/** Function handlers, extended version. */
125#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
126 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
127/** Function handlers, with CPUMCPU storage variable. */
128#define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
129 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
130 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
131/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
132#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
133 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
134 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
135/** Read-only fixed value. */
136#define MVO(a_uMsr, a_szName, a_uValue) \
137 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
138/** Read-only fixed value, ignores all writes. */
139#define MVI(a_uMsr, a_szName, a_uValue) \
140 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
141/** Read fixed value, ignore writes outside GP mask. */
142#define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
143 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
144/** Read fixed value, extended version with both GP and ignore masks. */
145#define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
146 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
147/** The short form, no CPUM backing. */
148#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
149 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
150 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
151
152/** Range: Functions handles everything. */
153#define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
154 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
155/** Range: Read fixed value, read-only. */
156#define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
157 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
158/** Range: Read fixed value, ignore writes. */
159#define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
160 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
161/** Range: The short form, no CPUM backing. */
162#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
163 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
164 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
165
166/** Internal form used by the macros. */
167#ifdef VBOX_WITH_STATISTICS
168# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
169 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
170 { 0 }, { 0 }, { 0 }, { 0 } }
171#else
172# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
173 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
174#endif
175/** @} */
176
177
178#include "cpus/Intel_Core_i7_3960X.h"
179#include "cpus/Intel_Core_i5_3570.h"
180#include "cpus/Intel_Core_i7_2635QM.h"
181#include "cpus/Intel_Xeon_X5482_3_20GHz.h"
182#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
183#include "cpus/Intel_Pentium_4_3_00GHz.h"
184
185#include "cpus/AMD_FX_8150_Eight_Core.h"
186#include "cpus/AMD_Phenom_II_X6_1100T.h"
187#include "cpus/Quad_Core_AMD_Opteron_2384.h"
188#include "cpus/AMD_Athlon_64_X2_Dual_Core_4200.h"
189#include "cpus/AMD_Athlon_64_3200.h"
190
191#include "cpus/VIA_QuadCore_L4700_1_2_GHz.h"
192
193
194
195/**
196 * The database entries.
197 *
198 * 1. The first entry is special. It is the fallback for unknown
199 * processors. Thus, it better be pretty representative.
200 *
201 * 2. The first entry for a CPU vendor is likewise important as it is
202 * the default entry for that vendor.
203 *
204 * Generally we put the most recent CPUs first, since these tend to have the
205 * most complicated and backwards compatible list of MSRs.
206 */
207static CPUMDBENTRY const * const g_apCpumDbEntries[] =
208{
209#ifdef VBOX_CPUDB_Intel_Core_i5_3570
210 &g_Entry_Intel_Core_i5_3570,
211#endif
212#ifdef VBOX_CPUDB_Intel_Core_i7_3960X
213 &g_Entry_Intel_Core_i7_3960X,
214#endif
215#ifdef VBOX_CPUDB_Intel_Core_i7_2635QM
216 &g_Entry_Intel_Core_i7_2635QM,
217#endif
218#ifdef Intel_Pentium_M_processor_2_00GHz
219 &g_Entry_Intel_Pentium_M_processor_2_00GHz,
220#endif
221#ifdef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz
222 &g_Entry_Intel_Xeon_X5482_3_20GHz,
223#endif
224#ifdef VBOX_CPUDB_Intel_Pentium_4_3_00GHz
225 &g_Entry_Intel_Pentium_4_3_00GHz,
226#endif
227
228#ifdef VBOX_CPUDB_AMD_FX_8150_Eight_Core
229 &g_Entry_AMD_FX_8150_Eight_Core,
230#endif
231#ifdef VBOX_CPUDB_AMD_Phenom_II_X6_1100T
232 &g_Entry_AMD_Phenom_II_X6_1100T,
233#endif
234#ifdef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384
235 &g_Entry_Quad_Core_AMD_Opteron_2384,
236#endif
237#ifdef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200
238 &g_Entry_AMD_Athlon_64_X2_Dual_Core_4200,
239#endif
240#ifdef VBOX_CPUDB_AMD_Athlon_64_3200
241 &g_Entry_AMD_Athlon_64_3200,
242#endif
243
244#ifdef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz
245 &g_Entry_VIA_QuadCore_L4700_1_2_GHz,
246#endif
247};
248
249
250#ifndef CPUM_DB_STANDALONE
251
252/**
253 * Binary search used by cpumR3MsrRangesInsert and has some special properties
254 * wrt to mismatches.
255 *
256 * @returns Insert location.
257 * @param paMsrRanges The MSR ranges to search.
258 * @param cMsrRanges The number of MSR ranges.
259 * @param uMsr What to search for.
260 */
261static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
262{
263 if (!cMsrRanges)
264 return 0;
265
266 uint32_t iStart = 0;
267 uint32_t iLast = cMsrRanges - 1;
268 for (;;)
269 {
270 uint32_t i = iStart + (iLast - iStart + 1) / 2;
271 if ( uMsr >= paMsrRanges[i].uFirst
272 && uMsr <= paMsrRanges[i].uLast)
273 return i;
274 if (uMsr < paMsrRanges[i].uFirst)
275 {
276 if (i <= iStart)
277 return i;
278 iLast = i - 1;
279 }
280 else
281 {
282 if (i >= iLast)
283 {
284 if (i < cMsrRanges)
285 i++;
286 return i;
287 }
288 iStart = i + 1;
289 }
290 }
291}
292
293
294/**
295 * Ensures that there is space for at least @a cNewRanges in the table,
296 * reallocating the table if necessary.
297 *
298 * @returns Pointer to the MSR ranges on success, NULL on failure. On failure
299 * @a *ppaMsrRanges is freed and set to NULL.
300 * @param pVM Pointer to the VM, used as the heap selector.
301 * Passing NULL uses the host-context heap, otherwise
302 * the VM's hyper heap is used.
303 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
304 * @param cMsrRanges The current number of ranges.
305 * @param cNewRanges The number of ranges to be added.
306 */
307static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
308{
309 uint32_t cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16);
310 if (cMsrRangesAllocated < cMsrRanges + cNewRanges)
311 {
312 void *pvNew;
313 uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
314 if (pVM)
315 {
316 Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
317 Assert(cMsrRanges == pVM->cpum.s.GuestInfo.cMsrRanges);
318
319 size_t cb = cMsrRangesAllocated * sizeof(**ppaMsrRanges);
320 size_t cbNew = cNew * sizeof(**ppaMsrRanges);
321 int rc = MMR3HyperRealloc(pVM, *ppaMsrRanges, cb, 32, MM_TAG_CPUM_MSRS, cbNew, &pvNew);
322 if (RT_FAILURE(rc))
323 {
324 *ppaMsrRanges = NULL;
325 pVM->cpum.s.GuestInfo.paMsrRangesR0 = NIL_RTR0PTR;
326 pVM->cpum.s.GuestInfo.paMsrRangesRC = NIL_RTRCPTR;
327 return NULL;
328 }
329
330 pVM->cpum.s.GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, *ppaMsrRanges);
331 pVM->cpum.s.GuestInfo.paMsrRangesR0 = MMHyperR3ToRC(pVM, *ppaMsrRanges);
332 /** @todo Update R0 and RC pointers here? */
333 }
334 else
335 {
336 pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
337 if (!pvNew)
338 {
339 RTMemFree(*ppaMsrRanges);
340 *ppaMsrRanges = NULL;
341 return NULL;
342 }
343 }
344 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
345 }
346 return *ppaMsrRanges;
347}
348
349
350/**
351 * Inserts a new MSR range in into an sorted MSR range array.
352 *
353 * If the new MSR range overlaps existing ranges, the existing ones will be
354 * adjusted/removed to fit in the new one.
355 *
356 * @returns VBox status code.
357 * @retval VINF_SUCCESS
358 * @retval VERR_NO_MEMORY
359 *
360 * @param pVM Pointer to the VM, used as the heap selector.
361 * Passing NULL uses the host-context heap, otherwise
362 * the hyper heap.
363 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
364 * Must be NULL if using the hyper heap.
365 * @param pcMsrRanges The variable holding number of ranges. Must be NULL
366 * if using the hyper heap.
367 * @param pNewRange The new range.
368 */
369int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
370{
371 Assert(pNewRange->uLast >= pNewRange->uFirst);
372 Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
373 Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
374
375 /*
376 * Validate and use the VM's MSR ranges array if we are using the hyper heap.
377 */
378 if (pVM)
379 {
380 AssertReturn(!ppaMsrRanges, VERR_INVALID_PARAMETER);
381 AssertReturn(!pcMsrRanges, VERR_INVALID_PARAMETER);
382
383 ppaMsrRanges = &pVM->cpum.s.GuestInfo.paMsrRangesR3;
384 pcMsrRanges = &pVM->cpum.s.GuestInfo.cMsrRanges;
385 }
386
387 uint32_t cMsrRanges = *pcMsrRanges;
388 PCPUMMSRRANGE paMsrRanges = *ppaMsrRanges;
389
390 /*
391 * Optimize the linear insertion case where we add new entries at the end.
392 */
393 if ( cMsrRanges > 0
394 && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
395 {
396 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
397 if (!paMsrRanges)
398 return VERR_NO_MEMORY;
399 paMsrRanges[cMsrRanges] = *pNewRange;
400 *pcMsrRanges += 1;
401 }
402 else
403 {
404 uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
405 Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
406 Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
407
408 /*
409 * Adding an entirely new entry?
410 */
411 if ( i >= cMsrRanges
412 || pNewRange->uLast < paMsrRanges[i].uFirst)
413 {
414 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
415 if (!paMsrRanges)
416 return VERR_NO_MEMORY;
417 if (i < cMsrRanges)
418 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
419 paMsrRanges[i] = *pNewRange;
420 *pcMsrRanges += 1;
421 }
422 /*
423 * Replace existing entry?
424 */
425 else if ( pNewRange->uFirst == paMsrRanges[i].uFirst
426 && pNewRange->uLast == paMsrRanges[i].uLast)
427 paMsrRanges[i] = *pNewRange;
428 /*
429 * Splitting an existing entry?
430 */
431 else if ( pNewRange->uFirst > paMsrRanges[i].uFirst
432 && pNewRange->uLast < paMsrRanges[i].uLast)
433 {
434 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 2);
435 if (!paMsrRanges)
436 return VERR_NO_MEMORY;
437 if (i < cMsrRanges)
438 memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
439 paMsrRanges[i + 1] = *pNewRange;
440 paMsrRanges[i + 2] = paMsrRanges[i];
441 paMsrRanges[i ].uLast = pNewRange->uFirst - 1;
442 paMsrRanges[i + 2].uFirst = pNewRange->uLast + 1;
443 *pcMsrRanges += 2;
444 }
445 /*
446 * Complicated scenarios that can affect more than one range.
447 *
448 * The current code does not optimize memmove calls when replacing
449 * one or more existing ranges, because it's tedious to deal with and
450 * not expected to be a frequent usage scenario.
451 */
452 else
453 {
454 /* Adjust start of first match? */
455 if ( pNewRange->uFirst <= paMsrRanges[i].uFirst
456 && pNewRange->uLast < paMsrRanges[i].uLast)
457 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
458 else
459 {
460 /* Adjust end of first match? */
461 if (pNewRange->uFirst > paMsrRanges[i].uFirst)
462 {
463 Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
464 paMsrRanges[i].uLast = pNewRange->uFirst - 1;
465 i++;
466 }
467 /* Replace the whole first match (lazy bird). */
468 else
469 {
470 if (i + 1 < cMsrRanges)
471 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
472 cMsrRanges = *pcMsrRanges -= 1;
473 }
474
475 /* Do the new range affect more ranges? */
476 while ( i < cMsrRanges
477 && pNewRange->uLast >= paMsrRanges[i].uFirst)
478 {
479 if (pNewRange->uLast < paMsrRanges[i].uLast)
480 {
481 /* Adjust the start of it, then we're done. */
482 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
483 break;
484 }
485
486 /* Remove it entirely. */
487 if (i + 1 < cMsrRanges)
488 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
489 cMsrRanges = *pcMsrRanges -= 1;
490 }
491 }
492
493 /* Now, perform a normal insertion. */
494 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
495 if (!paMsrRanges)
496 return VERR_NO_MEMORY;
497 if (i < cMsrRanges)
498 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
499 paMsrRanges[i] = *pNewRange;
500 *pcMsrRanges += 1;
501 }
502 }
503
504 return VINF_SUCCESS;
505}
506
507
508/**
509 * Worker for cpumR3MsrApplyFudge that applies one table.
510 *
511 * @returns VBox status code.
512 * @param pVM Pointer to the cross context VM structure.
513 * @param paRanges Array of MSRs to fudge.
514 * @param cRanges Number of MSRs in the array.
515 */
516static int cpumR3MsrApplyFudgeTable(PVM pVM, PCCPUMMSRRANGE paRanges, size_t cRanges)
517{
518 for (uint32_t i = 0; i < cRanges; i++)
519 if (!cpumLookupMsrRange(pVM, paRanges[i].uFirst))
520 {
521 LogRel(("CPUM: MSR fudge: %#010x %s\n", paRanges[i].uFirst, paRanges[i].szName));
522 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
523 &paRanges[i]);
524 if (RT_FAILURE(rc))
525 return rc;
526 }
527 return VINF_SUCCESS;
528}
529
530
531/**
532 * Fudges the MSRs that guest are known to access in some odd cases.
533 *
534 * A typical example is a VM that has been moved between different hosts where
535 * for instance the cpu vendor differs.
536 *
537 * @returns VBox status code.
538 * @param pVM Pointer to the cross context VM structure.
539 */
540int cpumR3MsrApplyFudge(PVM pVM)
541{
542 /*
543 * Basic.
544 */
545 static CPUMMSRRANGE const s_aFudgeMsrs[] =
546 {
547 MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr),
548 MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX),
549 MVO(0x00000017, "IA32_PLATFORM_ID", 0),
550 MFN(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase),
551 MVI(0x0000008b, "BIOS_SIGN", 0),
552 MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0),
553 MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x005, 0, 0),
554 MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0),
555 MFN(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable),
556 MFN(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl),
557 MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp),
558 MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp),
559 MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp),
560 MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp),
561 MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
562 MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
563 MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
564 };
565 int rc = cpumR3MsrApplyFudgeTable(pVM, &s_aFudgeMsrs[0], RT_ELEMENTS(s_aFudgeMsrs));
566 AssertLogRelRCReturn(rc, rc);
567
568 /*
569 * XP might mistake opterons and other newer CPUs for P4s.
570 */
571 if (pVM->cpum.s.GuestFeatures.uFamily >= 0xf)
572 {
573 static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
574 {
575 MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
576 };
577 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aP4FudgeMsrs[0], RT_ELEMENTS(s_aP4FudgeMsrs));
578 AssertLogRelRCReturn(rc, rc);
579 }
580
581 return rc;
582}
583
584
585int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo)
586{
587 CPUMDBENTRY const *pEntry = NULL;
588 int rc;
589
590 if (!strcmp(pszName, "host"))
591 {
592 /*
593 * Create a CPU database entry for the host CPU. This means getting
594 * the CPUID bits from the real CPU and grabbing the closest matching
595 * database entry for MSRs.
596 */
597 rc = CPUMR3CpuIdDetectUnknownLeafMethod(&pInfo->enmUnknownCpuIdMethod, &pInfo->DefCpuId);
598 if (RT_FAILURE(rc))
599 return rc;
600 rc = CPUMR3CpuIdCollectLeaves(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
601 if (RT_FAILURE(rc))
602 return rc;
603
604 /* Lookup database entry for MSRs. */
605 CPUMCPUVENDOR const enmVendor = CPUMR3CpuIdDetectVendorEx(pInfo->paCpuIdLeavesR3[0].uEax,
606 pInfo->paCpuIdLeavesR3[0].uEbx,
607 pInfo->paCpuIdLeavesR3[0].uEcx,
608 pInfo->paCpuIdLeavesR3[0].uEdx);
609 uint32_t const uStd1Eax = pInfo->paCpuIdLeavesR3[1].uEax;
610 uint8_t const uFamily = ASMGetCpuFamily(uStd1Eax);
611 uint8_t const uModel = ASMGetCpuModel(uStd1Eax, enmVendor == CPUMCPUVENDOR_INTEL);
612 uint8_t const uStepping = ASMGetCpuStepping(uStd1Eax);
613 CPUMMICROARCH const enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(enmVendor, uFamily, uModel, uStepping);
614
615 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
616 {
617 CPUMDBENTRY const *pCur = g_apCpumDbEntries[i];
618 if ((CPUMCPUVENDOR)pCur->enmVendor == enmVendor)
619 {
620 /* Match against Family, Microarch, model and stepping. Except
621 for family, always match the closer with preference given to
622 the later/older ones. */
623 if (pCur->uFamily == uFamily)
624 {
625 if (pCur->enmMicroarch == enmMicroarch)
626 {
627 if (pCur->uModel == uModel)
628 {
629 if (pCur->uStepping == uStepping)
630 {
631 /* Perfect match. */
632 pEntry = pCur;
633 break;
634 }
635
636 if ( !pEntry
637 || pEntry->uModel != uModel
638 || pEntry->enmMicroarch != enmMicroarch
639 || pEntry->uFamily != uFamily)
640 pEntry = pCur;
641 else if ( pCur->uStepping >= uStepping
642 ? pCur->uStepping < pEntry->uStepping || pEntry->uStepping < uStepping
643 : pCur->uStepping > pEntry->uStepping)
644 pEntry = pCur;
645 }
646 else if ( !pEntry
647 || pEntry->enmMicroarch != enmMicroarch
648 || pEntry->uFamily != uFamily)
649 pEntry = pCur;
650 else if ( pCur->uModel >= uModel
651 ? pCur->uModel < pEntry->uModel || pEntry->uModel < uModel
652 : pCur->uModel > pEntry->uModel)
653 pEntry = pCur;
654 }
655 else if ( !pEntry
656 || pEntry->uFamily != uFamily)
657 pEntry = pCur;
658 else if ( pCur->enmMicroarch >= enmMicroarch
659 ? pCur->enmMicroarch < pEntry->enmMicroarch || pEntry->enmMicroarch < enmMicroarch
660 : pCur->enmMicroarch > pEntry->enmMicroarch)
661 pEntry = pCur;
662 }
663 /* We don't do closeness matching on family, we use the first
664 entry for the CPU vendor instead. (P4 workaround.) */
665 else if (!pEntry)
666 pEntry = pCur;
667 }
668 }
669
670 if (pEntry)
671 LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s).\n",
672 CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
673 pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
674 pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
675 else
676 {
677 pEntry = g_apCpumDbEntries[0];
678 LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'.\n",
679 CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
680 pEntry->pszName));
681 }
682 }
683 else
684 {
685 /*
686 * We're supposed to be emulating a specific CPU that is included in
687 * our CPU database. The CPUID tables needs to be copied onto the
688 * heap so the caller can modify them and so they can be freed like
689 * in the host case above.
690 */
691 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
692 if (!strcmp(pszName, g_apCpumDbEntries[i]->pszName))
693 {
694 pEntry = g_apCpumDbEntries[i];
695 break;
696 }
697 if (!pEntry)
698 {
699 LogRel(("CPUM: Cannot locate any CPU by the name '%s'\n", pszName));
700 return VERR_CPUM_DB_CPU_NOT_FOUND;
701 }
702
703 pInfo->cCpuIdLeaves = pEntry->cCpuIdLeaves;
704 if (pEntry->cCpuIdLeaves)
705 {
706 pInfo->paCpuIdLeavesR3 = (PCPUMCPUIDLEAF)RTMemDup(pEntry->paCpuIdLeaves,
707 sizeof(pEntry->paCpuIdLeaves[0]) * pEntry->cCpuIdLeaves);
708 if (!pInfo->paCpuIdLeavesR3)
709 return VERR_NO_MEMORY;
710 }
711 else
712 pInfo->paCpuIdLeavesR3 = NULL;
713
714 pInfo->enmUnknownCpuIdMethod = pEntry->enmUnknownCpuId;
715 pInfo->DefCpuId = pEntry->DefUnknownCpuId;
716
717 LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s).\n",
718 pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor),
719 pEntry->uFamily, pEntry->uModel, pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
720 }
721
722 pInfo->fMsrMask = pEntry->fMsrMask;
723 pInfo->iFirstExtCpuIdLeaf = 0; /* Set by caller. */
724 pInfo->uPadding = 0;
725 pInfo->uScalableBusFreq = pEntry->uScalableBusFreq;
726 pInfo->paCpuIdLeavesR0 = NIL_RTR0PTR;
727 pInfo->paMsrRangesR0 = NIL_RTR0PTR;
728 pInfo->paCpuIdLeavesRC = NIL_RTRCPTR;
729 pInfo->paMsrRangesRC = NIL_RTRCPTR;
730
731 /*
732 * Copy the MSR range.
733 */
734 uint32_t cMsrs = 0;
735 PCPUMMSRRANGE paMsrs = NULL;
736
737 PCCPUMMSRRANGE pCurMsr = pEntry->paMsrRanges;
738 uint32_t cLeft = pEntry->cMsrRanges;
739 while (cLeft-- > 0)
740 {
741 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &paMsrs, &cMsrs, pCurMsr);
742 if (RT_FAILURE(rc))
743 {
744 Assert(!paMsrs); /* The above function frees this. */
745 RTMemFree(pInfo->paCpuIdLeavesR3);
746 pInfo->paCpuIdLeavesR3 = NULL;
747 return rc;
748 }
749 pCurMsr++;
750 }
751
752 pInfo->paMsrRangesR3 = paMsrs;
753 pInfo->cMsrRanges = cMsrs;
754 return VINF_SUCCESS;
755}
756
757
758/**
759 * Insert an MSR range into the VM.
760 *
761 * If the new MSR range overlaps existing ranges, the existing ones will be
762 * adjusted/removed to fit in the new one.
763 *
764 * @returns VBox status code.
765 * @param pVM Pointer to the cross context VM structure.
766 * @param pNewRange Pointer to the MSR range being inserted.
767 */
768VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange)
769{
770 AssertReturn(pVM, VERR_INVALID_PARAMETER);
771 AssertReturn(pNewRange, VERR_INVALID_PARAMETER);
772
773 return cpumR3MsrRangesInsert(pVM, NULL /* ppaMsrRanges */, NULL /* pcMsrRanges */, pNewRange);
774}
775
776
777/**
778 * Register statistics for the MSRs.
779 *
780 * This must not be called before the MSRs have been finalized and moved to the
781 * hyper heap.
782 *
783 * @returns VBox status code.
784 * @param pVM Pointer to the cross context VM structure.
785 */
786int cpumR3MsrRegStats(PVM pVM)
787{
788 /*
789 * Global statistics.
790 */
791 PCPUM pCpum = &pVM->cpum.s;
792 STAM_REL_REG(pVM, &pCpum->cMsrReads, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Reads",
793 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
794 STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsRaisingGP",
795 STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
796 STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsUnknown",
797 STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
798 STAM_REL_REG(pVM, &pCpum->cMsrWrites, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Writes",
799 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
800 STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesRaisingGP",
801 STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
802 STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
803 STAMUNIT_OCCURENCES, "Writing of ignored bits.");
804 STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesUnknown",
805 STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
806
807
808# ifdef VBOX_WITH_STATISTICS
809 /*
810 * Per range.
811 */
812 PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
813 uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges;
814 for (uint32_t i = 0; i < cRanges; i++)
815 {
816 char szName[160];
817 ssize_t cchName;
818
819 if (paRanges[i].uFirst == paRanges[i].uLast)
820 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
821 paRanges[i].uFirst, paRanges[i].szName);
822 else
823 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
824 paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
825
826 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
827 STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
828
829 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
830 STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
831
832 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
833 STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
834
835 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
836 STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
837 }
838# endif /* VBOX_WITH_STATISTICS */
839
840 return VINF_SUCCESS;
841}
842
843#endif /* !CPUM_DB_STANDALONE */
844
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette