VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp@ 108400

Last change on this file since 108400 was 107854, checked in by vboxsync, 4 months ago

x86.h,VMM: More AMD CPUID bits; addressed some old todos related to these; fixed bugs in svn & vmx world switcher (sanity checks, ++). jiraref:VBP-947 bugref:10738

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 46.7 KB
Line 
1/* $Id: CPUMR3Db.cpp 107854 2025-01-18 23:59:26Z vboxsync $ */
2/** @file
3 * CPUM - CPU database part.
4 */
5
6/*
7 * Copyright (C) 2013-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include "CPUMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/mm.h>
37
38#include <VBox/err.h>
39#if !defined(RT_ARCH_ARM64)
40# include <iprt/asm-amd64-x86.h>
41#endif
42#include <iprt/mem.h>
43#include <iprt/string.h>
44
45
46/*********************************************************************************************************************************
47* Defined Constants And Macros *
48*********************************************************************************************************************************/
49/** @def NULL_ALONE
50 * For eliminating an unnecessary data dependency in standalone builds (for
51 * VBoxSVC). */
52/** @def ZERO_ALONE
53 * For eliminating an unnecessary data size dependency in standalone builds (for
54 * VBoxSVC). */
55#ifndef CPUM_DB_STANDALONE
56# define NULL_ALONE(a_aTable) a_aTable
57# define ZERO_ALONE(a_cTable) a_cTable
58#else
59# define NULL_ALONE(a_aTable) NULL
60# define ZERO_ALONE(a_cTable) 0
61#endif
62
63
64/** @name Short macros for the MSR range entries.
65 *
66 * These are rather cryptic, but this is to reduce the attack on the right
67 * margin.
68 *
69 * @{ */
70/** Alias one MSR onto another (a_uTarget). */
71#define MAL(a_uMsr, a_szName, a_uTarget) \
72 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
73/** Functions handles everything. */
74#define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
75 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
76/** Functions handles everything, with GP mask. */
77#define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
78 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
79/** Function handlers, read-only. */
80#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
81 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
82/** Function handlers, ignore all writes. */
83#define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
84 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
85/** Function handlers, with value. */
86#define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
87 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
88/** Function handlers, with write ignore mask. */
89#define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
90 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
91/** Function handlers, extended version. */
92#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
93 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
94/** Function handlers, with CPUMCPU storage variable. */
95#define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
96 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
97 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
98/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
99#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
100 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
101 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
102/** Read-only fixed value. */
103#define MVO(a_uMsr, a_szName, a_uValue) \
104 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
105/** Read-only fixed value, ignores all writes. */
106#define MVI(a_uMsr, a_szName, a_uValue) \
107 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
108/** Read fixed value, ignore writes outside GP mask. */
109#define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
110 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
111/** Read fixed value, extended version with both GP and ignore masks. */
112#define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
113 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
114/** The short form, no CPUM backing. */
115#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
116 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
117 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
118
119/** Range: Functions handles everything. */
120#define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
121 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
122/** Range: Read fixed value, read-only. */
123#define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
124 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
125/** Range: Read fixed value, ignore writes. */
126#define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
127 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
128/** Range: The short form, no CPUM backing. */
129#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
130 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
131 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
132
133/** Internal form used by the macros. */
134#ifdef VBOX_WITH_STATISTICS
135# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
136 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
137 { 0 }, { 0 }, { 0 }, { 0 } }
138#else
139# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
140 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
141#endif
142/** @} */
143
144#ifndef CPUM_DB_STANDALONE
145
146#include "cpus/Intel_Core_i7_6700K.h"
147#include "cpus/Intel_Core_i7_5600U.h"
148#include "cpus/Intel_Core_i7_3960X.h"
149#include "cpus/Intel_Core_i5_3570.h"
150#include "cpus/Intel_Core_i7_2635QM.h"
151#include "cpus/Intel_Xeon_X5482_3_20GHz.h"
152#include "cpus/Intel_Core2_X6800_2_93GHz.h"
153#include "cpus/Intel_Core2_T7600_2_33GHz.h"
154#include "cpus/Intel_Core_Duo_T2600_2_16GHz.h"
155#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
156#include "cpus/Intel_Pentium_4_3_00GHz.h"
157#include "cpus/Intel_Pentium_N3530_2_16GHz.h"
158#include "cpus/Intel_Atom_330_1_60GHz.h"
159#include "cpus/Intel_80486.h"
160#include "cpus/Intel_80386.h"
161#include "cpus/Intel_80286.h"
162#include "cpus/Intel_80186.h"
163#include "cpus/Intel_8086.h"
164
165#include "cpus/AMD_Ryzen_7_1800X_Eight_Core.h"
166#include "cpus/AMD_FX_8150_Eight_Core.h"
167#include "cpus/AMD_Phenom_II_X6_1100T.h"
168#include "cpus/Quad_Core_AMD_Opteron_2384.h"
169#include "cpus/AMD_Athlon_64_X2_Dual_Core_4200.h"
170#include "cpus/AMD_Athlon_64_3200.h"
171
172#include "cpus/VIA_QuadCore_L4700_1_2_GHz.h"
173
174#include "cpus/ZHAOXIN_KaiXian_KX_U5581_1_8GHz.h"
175
176#include "cpus/Hygon_C86_7185_32_core.h"
177
178
179/**
180 * The database entries.
181 *
182 * 1. The first entry is special. It is the fallback for unknown
183 * processors. Thus, it better be pretty representative.
184 *
185 * 2. The first entry for a CPU vendor is likewise important as it is
186 * the default entry for that vendor.
187 *
188 * Generally we put the most recent CPUs first, since these tend to have the
189 * most complicated and backwards compatible list of MSRs.
190 */
191static CPUMDBENTRY const * const g_apCpumDbEntries[] =
192{
193#ifdef VBOX_CPUDB_Intel_Core_i7_6700K_h
194 &g_Entry_Intel_Core_i7_6700K,
195#endif
196#ifdef VBOX_CPUDB_Intel_Core_i7_5600U_h
197 &g_Entry_Intel_Core_i7_5600U,
198#endif
199#ifdef VBOX_CPUDB_Intel_Core_i5_3570_h
200 &g_Entry_Intel_Core_i5_3570,
201#endif
202#ifdef VBOX_CPUDB_Intel_Core_i7_3960X_h
203 &g_Entry_Intel_Core_i7_3960X,
204#endif
205#ifdef VBOX_CPUDB_Intel_Core_i7_2635QM_h
206 &g_Entry_Intel_Core_i7_2635QM,
207#endif
208#ifdef VBOX_CPUDB_Intel_Pentium_N3530_2_16GHz_h
209 &g_Entry_Intel_Pentium_N3530_2_16GHz,
210#endif
211#ifdef VBOX_CPUDB_Intel_Atom_330_1_60GHz_h
212 &g_Entry_Intel_Atom_330_1_60GHz,
213#endif
214#ifdef VBOX_CPUDB_Intel_Pentium_M_processor_2_00GHz_h
215 &g_Entry_Intel_Pentium_M_processor_2_00GHz,
216#endif
217#ifdef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz_h
218 &g_Entry_Intel_Xeon_X5482_3_20GHz,
219#endif
220#ifdef VBOX_CPUDB_Intel_Core2_X6800_2_93GHz_h
221 &g_Entry_Intel_Core2_X6800_2_93GHz,
222#endif
223#ifdef VBOX_CPUDB_Intel_Core2_T7600_2_33GHz_h
224 &g_Entry_Intel_Core2_T7600_2_33GHz,
225#endif
226#ifdef VBOX_CPUDB_Intel_Core_Duo_T2600_2_16GHz_h
227 &g_Entry_Intel_Core_Duo_T2600_2_16GHz,
228#endif
229#ifdef VBOX_CPUDB_Intel_Pentium_4_3_00GHz_h
230 &g_Entry_Intel_Pentium_4_3_00GHz,
231#endif
232/** @todo pentium, pentium mmx, pentium pro, pentium II, pentium III */
233#ifdef VBOX_CPUDB_Intel_80486_h
234 &g_Entry_Intel_80486,
235#endif
236#ifdef VBOX_CPUDB_Intel_80386_h
237 &g_Entry_Intel_80386,
238#endif
239#ifdef VBOX_CPUDB_Intel_80286_h
240 &g_Entry_Intel_80286,
241#endif
242#ifdef VBOX_CPUDB_Intel_80186_h
243 &g_Entry_Intel_80186,
244#endif
245#ifdef VBOX_CPUDB_Intel_8086_h
246 &g_Entry_Intel_8086,
247#endif
248
249#ifdef VBOX_CPUDB_AMD_Ryzen_7_1800X_Eight_Core_h
250 &g_Entry_AMD_Ryzen_7_1800X_Eight_Core,
251#endif
252#ifdef VBOX_CPUDB_AMD_FX_8150_Eight_Core_h
253 &g_Entry_AMD_FX_8150_Eight_Core,
254#endif
255#ifdef VBOX_CPUDB_AMD_Phenom_II_X6_1100T_h
256 &g_Entry_AMD_Phenom_II_X6_1100T,
257#endif
258#ifdef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384_h
259 &g_Entry_Quad_Core_AMD_Opteron_2384,
260#endif
261#ifdef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200_h
262 &g_Entry_AMD_Athlon_64_X2_Dual_Core_4200,
263#endif
264#ifdef VBOX_CPUDB_AMD_Athlon_64_3200_h
265 &g_Entry_AMD_Athlon_64_3200,
266#endif
267
268#ifdef VBOX_CPUDB_ZHAOXIN_KaiXian_KX_U5581_1_8GHz_h
269 &g_Entry_ZHAOXIN_KaiXian_KX_U5581_1_8GHz,
270#endif
271
272#ifdef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz_h
273 &g_Entry_VIA_QuadCore_L4700_1_2_GHz,
274#endif
275
276#ifdef VBOX_CPUDB_NEC_V20_h
277 &g_Entry_NEC_V20,
278#endif
279
280#ifdef VBOX_CPUDB_Hygon_C86_7185_32_core_h
281 &g_Entry_Hygon_C86_7185_32_core,
282#endif
283};
284
285
286/**
287 * Returns the number of entries in the CPU database.
288 *
289 * @returns Number of entries.
290 * @sa PFNCPUMDBGETENTRIES
291 */
292VMMR3DECL(uint32_t) CPUMR3DbGetEntries(void)
293{
294 return RT_ELEMENTS(g_apCpumDbEntries);
295}
296
297
298/**
299 * Returns CPU database entry for the given index.
300 *
301 * @returns Pointer the CPU database entry, NULL if index is out of bounds.
302 * @param idxCpuDb The index (0..CPUMR3DbGetEntries).
303 * @sa PFNCPUMDBGETENTRYBYINDEX
304 */
305VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByIndex(uint32_t idxCpuDb)
306{
307 AssertReturn(idxCpuDb < RT_ELEMENTS(g_apCpumDbEntries), NULL);
308 return g_apCpumDbEntries[idxCpuDb];
309}
310
311
312/**
313 * Returns CPU database entry with the given name.
314 *
315 * @returns Pointer the CPU database entry, NULL if not found.
316 * @param pszName The name of the profile to return.
317 * @sa PFNCPUMDBGETENTRYBYNAME
318 */
319VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByName(const char *pszName)
320{
321 AssertPtrReturn(pszName, NULL);
322 AssertReturn(*pszName, NULL);
323 for (size_t i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
324 if (strcmp(g_apCpumDbEntries[i]->pszName, pszName) == 0)
325 return g_apCpumDbEntries[i];
326 return NULL;
327}
328
329
330
331/**
332 * Binary search used by cpumR3MsrRangesInsert and has some special properties
333 * wrt to mismatches.
334 *
335 * @returns Insert location.
336 * @param paMsrRanges The MSR ranges to search.
337 * @param cMsrRanges The number of MSR ranges.
338 * @param uMsr What to search for.
339 */
340static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
341{
342 if (!cMsrRanges)
343 return 0;
344
345 uint32_t iStart = 0;
346 uint32_t iLast = cMsrRanges - 1;
347 for (;;)
348 {
349 uint32_t i = iStart + (iLast - iStart + 1) / 2;
350 if ( uMsr >= paMsrRanges[i].uFirst
351 && uMsr <= paMsrRanges[i].uLast)
352 return i;
353 if (uMsr < paMsrRanges[i].uFirst)
354 {
355 if (i <= iStart)
356 return i;
357 iLast = i - 1;
358 }
359 else
360 {
361 if (i >= iLast)
362 {
363 if (i < cMsrRanges)
364 i++;
365 return i;
366 }
367 iStart = i + 1;
368 }
369 }
370}
371
372
373/**
374 * Ensures that there is space for at least @a cNewRanges in the table,
375 * reallocating the table if necessary.
376 *
377 * @returns Pointer to the MSR ranges on success, NULL on failure. On failure
378 * @a *ppaMsrRanges is freed and set to NULL.
379 * @param pVM The cross context VM structure. If NULL,
380 * use the process heap, otherwise the VM's hyper heap.
381 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
382 * @param cMsrRanges The current number of ranges.
383 * @param cNewRanges The number of ranges to be added.
384 */
385static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
386{
387 if ( cMsrRanges + cNewRanges
388 > RT_ELEMENTS(pVM->cpum.s.GuestInfo.aMsrRanges) + (pVM ? 0 : 128 /* Catch too many MSRs in CPU reporter! */))
389 {
390 LogRel(("CPUM: Too many MSR ranges! %#x, max %#x\n",
391 cMsrRanges + cNewRanges, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aMsrRanges)));
392 return NULL;
393 }
394 if (pVM)
395 {
396 Assert(cMsrRanges == pVM->cpum.s.GuestInfo.cMsrRanges);
397 Assert(*ppaMsrRanges == pVM->cpum.s.GuestInfo.aMsrRanges);
398 }
399 else
400 {
401 if (cMsrRanges + cNewRanges > RT_ALIGN_32(cMsrRanges, 16))
402 {
403
404 uint32_t const cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
405 void *pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
406 if (pvNew)
407 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
408 else
409 {
410 RTMemFree(*ppaMsrRanges);
411 *ppaMsrRanges = NULL;
412 return NULL;
413 }
414 }
415 }
416
417 return *ppaMsrRanges;
418}
419
420
421/**
422 * Inserts a new MSR range in into an sorted MSR range array.
423 *
424 * If the new MSR range overlaps existing ranges, the existing ones will be
425 * adjusted/removed to fit in the new one.
426 *
427 * @returns VBox status code.
428 * @retval VINF_SUCCESS
429 * @retval VERR_NO_MEMORY
430 *
431 * @param pVM The cross context VM structure. If NULL,
432 * use the process heap, otherwise the VM's hyper heap.
433 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
434 * Must be NULL if using the hyper heap.
435 * @param pcMsrRanges The variable holding number of ranges. Must be NULL
436 * if using the hyper heap.
437 * @param pNewRange The new range.
438 */
439int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
440{
441 Assert(pNewRange->uLast >= pNewRange->uFirst);
442 Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
443 Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
444
445 /*
446 * Validate and use the VM's MSR ranges array if we are using the hyper heap.
447 */
448 if (pVM)
449 {
450 AssertReturn(!ppaMsrRanges, VERR_INVALID_PARAMETER);
451 AssertReturn(!pcMsrRanges, VERR_INVALID_PARAMETER);
452 AssertReturn(pVM->cpum.s.GuestInfo.paMsrRangesR3 == pVM->cpum.s.GuestInfo.aMsrRanges, VERR_INTERNAL_ERROR_3);
453
454 ppaMsrRanges = &pVM->cpum.s.GuestInfo.paMsrRangesR3;
455 pcMsrRanges = &pVM->cpum.s.GuestInfo.cMsrRanges;
456 }
457 else
458 {
459 AssertReturn(ppaMsrRanges, VERR_INVALID_POINTER);
460 AssertReturn(pcMsrRanges, VERR_INVALID_POINTER);
461 }
462
463 uint32_t cMsrRanges = *pcMsrRanges;
464 PCPUMMSRRANGE paMsrRanges = *ppaMsrRanges;
465
466 /*
467 * Optimize the linear insertion case where we add new entries at the end.
468 */
469 if ( cMsrRanges > 0
470 && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
471 {
472 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
473 if (!paMsrRanges)
474 return VERR_NO_MEMORY;
475 paMsrRanges[cMsrRanges] = *pNewRange;
476 *pcMsrRanges += 1;
477 }
478 else
479 {
480 uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
481 Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
482 Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
483
484 /*
485 * Adding an entirely new entry?
486 */
487 if ( i >= cMsrRanges
488 || pNewRange->uLast < paMsrRanges[i].uFirst)
489 {
490 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
491 if (!paMsrRanges)
492 return VERR_NO_MEMORY;
493 if (i < cMsrRanges)
494 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
495 paMsrRanges[i] = *pNewRange;
496 *pcMsrRanges += 1;
497 }
498 /*
499 * Replace existing entry?
500 */
501 else if ( pNewRange->uFirst == paMsrRanges[i].uFirst
502 && pNewRange->uLast == paMsrRanges[i].uLast)
503 paMsrRanges[i] = *pNewRange;
504 /*
505 * Splitting an existing entry?
506 */
507 else if ( pNewRange->uFirst > paMsrRanges[i].uFirst
508 && pNewRange->uLast < paMsrRanges[i].uLast)
509 {
510 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 2);
511 if (!paMsrRanges)
512 return VERR_NO_MEMORY;
513 Assert(i < cMsrRanges);
514 memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
515 paMsrRanges[i + 1] = *pNewRange;
516 paMsrRanges[i + 2] = paMsrRanges[i];
517 paMsrRanges[i ].uLast = pNewRange->uFirst - 1;
518 paMsrRanges[i + 2].uFirst = pNewRange->uLast + 1;
519 *pcMsrRanges += 2;
520 }
521 /*
522 * Complicated scenarios that can affect more than one range.
523 *
524 * The current code does not optimize memmove calls when replacing
525 * one or more existing ranges, because it's tedious to deal with and
526 * not expected to be a frequent usage scenario.
527 */
528 else
529 {
530 /* Adjust start of first match? */
531 if ( pNewRange->uFirst <= paMsrRanges[i].uFirst
532 && pNewRange->uLast < paMsrRanges[i].uLast)
533 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
534 else
535 {
536 /* Adjust end of first match? */
537 if (pNewRange->uFirst > paMsrRanges[i].uFirst)
538 {
539 Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
540 paMsrRanges[i].uLast = pNewRange->uFirst - 1;
541 i++;
542 }
543 /* Replace the whole first match (lazy bird). */
544 else
545 {
546 if (i + 1 < cMsrRanges)
547 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
548 cMsrRanges = *pcMsrRanges -= 1;
549 }
550
551 /* Do the new range affect more ranges? */
552 while ( i < cMsrRanges
553 && pNewRange->uLast >= paMsrRanges[i].uFirst)
554 {
555 if (pNewRange->uLast < paMsrRanges[i].uLast)
556 {
557 /* Adjust the start of it, then we're done. */
558 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
559 break;
560 }
561
562 /* Remove it entirely. */
563 if (i + 1 < cMsrRanges)
564 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
565 cMsrRanges = *pcMsrRanges -= 1;
566 }
567 }
568
569 /* Now, perform a normal insertion. */
570 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
571 if (!paMsrRanges)
572 return VERR_NO_MEMORY;
573 if (i < cMsrRanges)
574 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
575 paMsrRanges[i] = *pNewRange;
576 *pcMsrRanges += 1;
577 }
578 }
579
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * Reconciles CPUID info with MSRs (selected ones).
586 *
587 * @returns VBox status code.
588 * @param pVM The cross context VM structure.
589 * @param fForceFlushCmd Make sure MSR_IA32_FLUSH_CMD is present.
590 * @param fForceSpecCtrl Make sure MSR_IA32_SPEC_CTRL is present.
591 */
592DECLHIDDEN(int) cpumR3MsrReconcileWithCpuId(PVM pVM, bool fForceFlushCmd, bool fForceSpecCtrl)
593{
594 PCCPUMMSRRANGE apToAdd[10];
595 uint32_t cToAdd = 0;
596
597 /*
598 * The IA32_FLUSH_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates.
599 */
600 if ( pVM->cpum.s.GuestFeatures.fFlushCmd
601 || fForceFlushCmd)
602 {
603 static CPUMMSRRANGE const s_FlushCmd =
604 {
605 /*.uFirst =*/ MSR_IA32_FLUSH_CMD,
606 /*.uLast =*/ MSR_IA32_FLUSH_CMD,
607 /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly,
608 /*.enmWrFn =*/ kCpumMsrWrFn_Ia32FlushCmd,
609 /*.offCpumCpu =*/ UINT16_MAX,
610 /*.fReserved =*/ 0,
611 /*.uValue =*/ 0,
612 /*.fWrIgnMask =*/ 0,
613 /*.fWrGpMask =*/ ~MSR_IA32_FLUSH_CMD_F_L1D,
614 /*.szName = */ "IA32_FLUSH_CMD"
615 };
616 apToAdd[cToAdd++] = &s_FlushCmd;
617 }
618
619 /*
620 * The IA32_PRED_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates.
621 */
622 if ( pVM->cpum.s.GuestFeatures.fIbpb
623 /** @todo || pVM->cpum.s.GuestFeatures.fSbpb*/)
624 {
625 static CPUMMSRRANGE const s_PredCmd =
626 {
627 /*.uFirst =*/ MSR_IA32_PRED_CMD,
628 /*.uLast =*/ MSR_IA32_PRED_CMD,
629 /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly,
630 /*.enmWrFn =*/ kCpumMsrWrFn_Ia32PredCmd,
631 /*.offCpumCpu =*/ UINT16_MAX,
632 /*.fReserved =*/ 0,
633 /*.uValue =*/ 0,
634 /*.fWrIgnMask =*/ 0,
635 /*.fWrGpMask =*/ ~MSR_IA32_PRED_CMD_F_IBPB,
636 /*.szName = */ "IA32_PRED_CMD"
637 };
638 apToAdd[cToAdd++] = &s_PredCmd;
639 }
640
641 /*
642 * The IA32_SPEC_CTRL MSR was introduced in MCUs for CVS-2018-3646 and associates.
643 */
644 if ( pVM->cpum.s.GuestFeatures.fSpecCtrlMsr
645 || fForceSpecCtrl)
646 {
647 static CPUMMSRRANGE const s_SpecCtrl =
648 {
649 /*.uFirst =*/ MSR_IA32_SPEC_CTRL,
650 /*.uLast =*/ MSR_IA32_SPEC_CTRL,
651 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32SpecCtrl,
652 /*.enmWrFn =*/ kCpumMsrWrFn_Ia32SpecCtrl,
653 /*.offCpumCpu =*/ UINT16_MAX,
654 /*.fReserved =*/ 0,
655 /*.uValue =*/ 0,
656 /*.fWrIgnMask =*/ 0,
657 /*.fWrGpMask =*/ 0,
658 /*.szName = */ "IA32_SPEC_CTRL"
659 };
660 apToAdd[cToAdd++] = &s_SpecCtrl;
661 }
662
663 /*
664 * The MSR_IA32_ARCH_CAPABILITIES was introduced in various spectre MCUs, or at least
665 * documented in relation to such.
666 */
667 if (pVM->cpum.s.GuestFeatures.fArchCap)
668 {
669 static CPUMMSRRANGE const s_ArchCaps =
670 {
671 /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES,
672 /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES,
673 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities,
674 /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly,
675 /*.offCpumCpu =*/ UINT16_MAX,
676 /*.fReserved =*/ 0,
677 /*.uValue =*/ 0,
678 /*.fWrIgnMask =*/ 0,
679 /*.fWrGpMask =*/ UINT64_MAX,
680 /*.szName = */ "IA32_ARCH_CAPABILITIES"
681 };
682 apToAdd[cToAdd++] = &s_ArchCaps;
683 }
684
685 /*
686 * Do the adding.
687 */
688 Assert(cToAdd <= RT_ELEMENTS(apToAdd));
689 for (uint32_t i = 0; i < cToAdd; i++)
690 {
691 PCCPUMMSRRANGE pRange = apToAdd[i];
692 Assert(pRange->uFirst == pRange->uLast);
693 if (!cpumLookupMsrRange(pVM, pRange->uFirst))
694 {
695 LogRel(("CPUM: MSR/CPUID reconciliation insert: %#010x %s\n", pRange->uFirst, pRange->szName));
696 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3,
697 &pVM->cpum.s.GuestInfo.cMsrRanges, pRange);
698 AssertRCReturn(rc, rc);
699 }
700 }
701 return VINF_SUCCESS;
702}
703
704
705/**
706 * Worker for cpumR3MsrApplyFudge that applies one table.
707 *
708 * @returns VBox status code.
709 * @param pVM The cross context VM structure.
710 * @param paRanges Array of MSRs to fudge.
711 * @param cRanges Number of MSRs in the array.
712 */
713static int cpumR3MsrApplyFudgeTable(PVM pVM, PCCPUMMSRRANGE paRanges, size_t cRanges)
714{
715 for (uint32_t i = 0; i < cRanges; i++)
716 if (!cpumLookupMsrRange(pVM, paRanges[i].uFirst))
717 {
718 LogRel(("CPUM: MSR fudge: %#010x %s\n", paRanges[i].uFirst, paRanges[i].szName));
719 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
720 &paRanges[i]);
721 if (RT_FAILURE(rc))
722 return rc;
723 }
724 return VINF_SUCCESS;
725}
726
727
728/**
729 * Fudges the MSRs that guest are known to access in some odd cases.
730 *
731 * A typical example is a VM that has been moved between different hosts where
732 * for instance the cpu vendor differs.
733 *
734 * Another example is older CPU profiles (e.g. Atom Bonnet) for newer CPUs (e.g.
735 * Atom Silvermont), where features reported thru CPUID aren't present in the
736 * MSRs (e.g. AMD64_TSC_AUX).
737 *
738 *
739 * @returns VBox status code.
740 * @param pVM The cross context VM structure.
741 */
742int cpumR3MsrApplyFudge(PVM pVM)
743{
744 /*
745 * Basic.
746 */
747 static CPUMMSRRANGE const s_aFudgeMsrs[] =
748 {
749 MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr),
750 MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX),
751 MVO(0x00000017, "IA32_PLATFORM_ID", 0),
752 MFN(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase),
753 MVI(0x0000008b, "BIOS_SIGN", 0),
754 MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0),
755 MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x005, 0, 0),
756 MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0),
757 MFN(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable),
758 MFN(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl),
759 MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp),
760 MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp),
761 MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp),
762 MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp),
763 MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
764 MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
765 MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
766 };
767 int rc = cpumR3MsrApplyFudgeTable(pVM, &s_aFudgeMsrs[0], RT_ELEMENTS(s_aFudgeMsrs));
768 AssertLogRelRCReturn(rc, rc);
769
770 /*
771 * XP might mistake opterons and other newer CPUs for P4s.
772 */
773 if (pVM->cpum.s.GuestFeatures.uFamily >= 0xf)
774 {
775 static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
776 {
777 MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
778 };
779 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aP4FudgeMsrs[0], RT_ELEMENTS(s_aP4FudgeMsrs));
780 AssertLogRelRCReturn(rc, rc);
781 }
782
783 if (pVM->cpum.s.GuestFeatures.fRdTscP)
784 {
785 static CPUMMSRRANGE const s_aRdTscPFudgeMsrs[] =
786 {
787 MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX),
788 };
789 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aRdTscPFudgeMsrs[0], RT_ELEMENTS(s_aRdTscPFudgeMsrs));
790 AssertLogRelRCReturn(rc, rc);
791 }
792
793 /*
794 * Windows 10 incorrectly writes to MSR_IA32_TSX_CTRL without checking
795 * CPUID.ARCH_CAP(EAX=7h,ECX=0):EDX[bit 29] or the MSR feature bits in
796 * MSR_IA32_ARCH_CAPABILITIES[bit 7], see @bugref{9630}.
797 * Ignore writes to this MSR and return 0 on reads.
798 *
799 * Windows 11 24H2 incorrectly reads MSR_IA32_MCU_OPT_CTRL without
800 * checking CPUID.ARCH_CAP(EAX=7h,ECX=0).EDX[bit 9] or the MSR feature
801 * bits in MSR_IA32_ARCH_CAPABILITIES[bit 18], see @bugref{10794}.
802 * Ignore wrties to this MSR and return 0 on reads.
803 */
804 if (pVM->cpum.s.GuestFeatures.fArchCap)
805 {
806 static CPUMMSRRANGE const s_aTsxCtrl[] =
807 {
808 MVI(MSR_IA32_TSX_CTRL, "IA32_TSX_CTRL", 0),
809 MVI(MSR_IA32_MCU_OPT_CTRL, "IA32_MCU_OPT_CTRL", 0),
810 };
811 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aTsxCtrl[0], RT_ELEMENTS(s_aTsxCtrl));
812 AssertLogRelRCReturn(rc, rc);
813 }
814
815 return rc;
816}
817
818#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
819
820/**
821 * Do we consider @a enmConsider a better match for @a enmTarget than
822 * @a enmFound?
823 *
824 * Only called when @a enmConsider isn't exactly what we're looking for.
825 *
826 * @returns true/false.
827 * @param enmConsider The new microarch to consider.
828 * @param enmTarget The target microarch.
829 * @param enmFound The best microarch match we've found thus far.
830 */
831DECLINLINE(bool) cpumR3DbIsBetterMarchMatch(CPUMMICROARCH enmConsider, CPUMMICROARCH enmTarget, CPUMMICROARCH enmFound)
832{
833 Assert(enmConsider != enmTarget);
834
835 /*
836 * If we've got an march match, don't bother with enmConsider.
837 */
838 if (enmFound == enmTarget)
839 return false;
840
841 /*
842 * Found is below: Pick 'consider' if it's closer to the target or above it.
843 */
844 if (enmFound < enmTarget)
845 return enmConsider > enmFound;
846
847 /*
848 * Found is above: Pick 'consider' if it's also above (paranoia: or equal)
849 * and but closer to the target.
850 */
851 return enmConsider >= enmTarget && enmConsider < enmFound;
852}
853
854
855/**
856 * Do we consider @a enmConsider a better match for @a enmTarget than
857 * @a enmFound?
858 *
859 * Only called for intel family 06h CPUs.
860 *
861 * @returns true/false.
862 * @param enmConsider The new microarch to consider.
863 * @param enmTarget The target microarch.
864 * @param enmFound The best microarch match we've found thus far.
865 */
866static bool cpumR3DbIsBetterIntelFam06Match(CPUMMICROARCH enmConsider, CPUMMICROARCH enmTarget, CPUMMICROARCH enmFound)
867{
868 /* Check intel family 06h claims. */
869 AssertReturn(enmConsider >= kCpumMicroarch_Intel_P6_Core_Atom_First && enmConsider <= kCpumMicroarch_Intel_P6_Core_Atom_End,
870 false);
871 AssertReturn( (enmTarget >= kCpumMicroarch_Intel_P6_Core_Atom_First && enmTarget <= kCpumMicroarch_Intel_P6_Core_Atom_End)
872 || enmTarget == kCpumMicroarch_Intel_Unknown,
873 false);
874
875 /* Put matches out of the way. */
876 if (enmConsider == enmTarget)
877 return true;
878 if (enmFound == enmTarget)
879 return false;
880
881 /* If found isn't a family 06h march, whatever we're considering must be a better choice. */
882 if ( enmFound < kCpumMicroarch_Intel_P6_Core_Atom_First
883 || enmFound > kCpumMicroarch_Intel_P6_Core_Atom_End)
884 return true;
885
886 /*
887 * The family 06h stuff is split into three categories:
888 * - Common P6 heritage
889 * - Core
890 * - Atom
891 *
892 * Determin which of the three arguments are Atom marchs, because that's
893 * all we need to make the right choice.
894 */
895 bool const fConsiderAtom = enmConsider >= kCpumMicroarch_Intel_Atom_First;
896 bool const fTargetAtom = enmTarget >= kCpumMicroarch_Intel_Atom_First;
897 bool const fFoundAtom = enmFound >= kCpumMicroarch_Intel_Atom_First;
898
899 /*
900 * Want atom:
901 */
902 if (fTargetAtom)
903 {
904 /* Pick the atom if we've got one of each.*/
905 if (fConsiderAtom != fFoundAtom)
906 return fConsiderAtom;
907 /* If we haven't got any atoms under consideration, pick a P6 or the earlier core.
908 Note! Not entirely sure Dothan is the best choice, but it'll do for now. */
909 if (!fConsiderAtom)
910 {
911 if (enmConsider > enmFound)
912 return enmConsider <= kCpumMicroarch_Intel_P6_M_Dothan;
913 return enmFound > kCpumMicroarch_Intel_P6_M_Dothan;
914 }
915 /* else: same category, default comparison rules. */
916 Assert(fConsiderAtom && fFoundAtom);
917 }
918 /*
919 * Want non-atom:
920 */
921 /* Pick the non-atom if we've got one of each. */
922 else if (fConsiderAtom != fFoundAtom)
923 return fFoundAtom;
924 /* If we've only got atoms under consideration, pick the older one just to pick something. */
925 else if (fConsiderAtom)
926 return enmConsider < enmFound;
927 else
928 Assert(!fConsiderAtom && !fFoundAtom);
929
930 /*
931 * Same basic category. Do same compare as caller.
932 */
933 return cpumR3DbIsBetterMarchMatch(enmConsider, enmTarget, enmFound);
934}
935
936#endif /* RT_ARCH_X86 || RT_ARCH_AMD64 */
937
938int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo)
939{
940 CPUMDBENTRY const *pEntry = NULL;
941 int rc;
942
943 if (!strcmp(pszName, "host"))
944#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
945 {
946 /*
947 * Create a CPU database entry for the host CPU. This means getting
948 * the CPUID bits from the real CPU and grabbing the closest matching
949 * database entry for MSRs.
950 */
951 rc = CPUMR3CpuIdDetectUnknownLeafMethod(&pInfo->enmUnknownCpuIdMethod, &pInfo->DefCpuId);
952 if (RT_FAILURE(rc))
953 return rc;
954 rc = CPUMCpuIdCollectLeavesFromX86Host(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
955 if (RT_FAILURE(rc))
956 return rc;
957 pInfo->fMxCsrMask = CPUMR3DeterminHostMxCsrMask();
958
959 /* Lookup database entry for MSRs. */
960 CPUMCPUVENDOR const enmVendor = CPUMCpuIdDetectX86VendorEx(pInfo->paCpuIdLeavesR3[0].uEax,
961 pInfo->paCpuIdLeavesR3[0].uEbx,
962 pInfo->paCpuIdLeavesR3[0].uEcx,
963 pInfo->paCpuIdLeavesR3[0].uEdx);
964 uint32_t const uStd1Eax = pInfo->paCpuIdLeavesR3[1].uEax;
965 uint8_t const uFamily = RTX86GetCpuFamily(uStd1Eax);
966 uint8_t const uModel = RTX86GetCpuModel(uStd1Eax, enmVendor == CPUMCPUVENDOR_INTEL);
967 uint8_t const uStepping = RTX86GetCpuStepping(uStd1Eax);
968 CPUMMICROARCH const enmMicroarch = CPUMCpuIdDetermineX86MicroarchEx(enmVendor, uFamily, uModel, uStepping);
969
970 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
971 {
972 CPUMDBENTRY const *pCur = g_apCpumDbEntries[i];
973 if ((CPUMCPUVENDOR)pCur->enmVendor == enmVendor)
974 {
975 /* Match against Family, Microarch, model and stepping. Except
976 for family, always match the closer with preference given to
977 the later/older ones. */
978 if (pCur->uFamily == uFamily)
979 {
980 if (pCur->enmMicroarch == enmMicroarch)
981 {
982 if (pCur->uModel == uModel)
983 {
984 if (pCur->uStepping == uStepping)
985 {
986 /* Perfect match. */
987 pEntry = pCur;
988 break;
989 }
990
991 if ( !pEntry
992 || pEntry->uModel != uModel
993 || pEntry->enmMicroarch != enmMicroarch
994 || pEntry->uFamily != uFamily)
995 pEntry = pCur;
996 else if ( pCur->uStepping >= uStepping
997 ? pCur->uStepping < pEntry->uStepping || pEntry->uStepping < uStepping
998 : pCur->uStepping > pEntry->uStepping)
999 pEntry = pCur;
1000 }
1001 else if ( !pEntry
1002 || pEntry->enmMicroarch != enmMicroarch
1003 || pEntry->uFamily != uFamily)
1004 pEntry = pCur;
1005 else if ( pCur->uModel >= uModel
1006 ? pCur->uModel < pEntry->uModel || pEntry->uModel < uModel
1007 : pCur->uModel > pEntry->uModel)
1008 pEntry = pCur;
1009 }
1010 else if ( !pEntry
1011 || pEntry->uFamily != uFamily)
1012 pEntry = pCur;
1013 /* Special march matching rules applies to intel family 06h. */
1014 else if ( enmVendor == CPUMCPUVENDOR_INTEL
1015 && uFamily == 6
1016 ? cpumR3DbIsBetterIntelFam06Match(pCur->enmMicroarch, enmMicroarch, pEntry->enmMicroarch)
1017 : cpumR3DbIsBetterMarchMatch(pCur->enmMicroarch, enmMicroarch, pEntry->enmMicroarch))
1018 pEntry = pCur;
1019 }
1020 /* We don't do closeness matching on family, we use the first
1021 entry for the CPU vendor instead. (P4 workaround.) */
1022 else if (!pEntry)
1023 pEntry = pCur;
1024 }
1025 }
1026
1027 if (pEntry)
1028 LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
1029 CPUMCpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMMicroarchName(enmMicroarch),
1030 pEntry->pszName, CPUMCpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
1031 pEntry->uStepping, CPUMMicroarchName(pEntry->enmMicroarch) ));
1032 else
1033 {
1034 pEntry = g_apCpumDbEntries[0];
1035 LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'\n",
1036 CPUMCpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMMicroarchName(enmMicroarch),
1037 pEntry->pszName));
1038 }
1039 }
1040 else
1041#else
1042 pszName = g_apCpumDbEntries[0]->pszName; /* Just pick the first entry for non-x86 hosts. */
1043#endif
1044 {
1045 /*
1046 * We're supposed to be emulating a specific CPU that is included in
1047 * our CPU database. The CPUID tables needs to be copied onto the
1048 * heap so the caller can modify them and so they can be freed like
1049 * in the host case above.
1050 */
1051 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
1052 if (!strcmp(pszName, g_apCpumDbEntries[i]->pszName))
1053 {
1054 pEntry = g_apCpumDbEntries[i];
1055 break;
1056 }
1057 if (!pEntry)
1058 {
1059 LogRel(("CPUM: Cannot locate any CPU by the name '%s'\n", pszName));
1060 return VERR_CPUM_DB_CPU_NOT_FOUND;
1061 }
1062
1063 pInfo->cCpuIdLeaves = pEntry->cCpuIdLeaves;
1064 if (pEntry->cCpuIdLeaves)
1065 {
1066 /* Must allocate a multiple of 16 here, matching cpumR3CpuIdEnsureSpace. */
1067 size_t cbExtra = sizeof(pEntry->paCpuIdLeaves[0]) * (RT_ALIGN(pEntry->cCpuIdLeaves, 16) - pEntry->cCpuIdLeaves);
1068 pInfo->paCpuIdLeavesR3 = (PCPUMCPUIDLEAF)RTMemDupEx(pEntry->paCpuIdLeaves,
1069 sizeof(pEntry->paCpuIdLeaves[0]) * pEntry->cCpuIdLeaves,
1070 cbExtra);
1071 if (!pInfo->paCpuIdLeavesR3)
1072 return VERR_NO_MEMORY;
1073 }
1074 else
1075 pInfo->paCpuIdLeavesR3 = NULL;
1076
1077 pInfo->enmUnknownCpuIdMethod = pEntry->enmUnknownCpuId;
1078 pInfo->DefCpuId = pEntry->DefUnknownCpuId;
1079 pInfo->fMxCsrMask = pEntry->fMxCsrMask;
1080
1081 LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
1082 pEntry->pszName, CPUMCpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor),
1083 pEntry->uFamily, pEntry->uModel, pEntry->uStepping, CPUMMicroarchName(pEntry->enmMicroarch) ));
1084 }
1085
1086 pInfo->fMsrMask = pEntry->fMsrMask;
1087 pInfo->iFirstExtCpuIdLeaf = 0; /* Set by caller. */
1088 pInfo->uScalableBusFreq = pEntry->uScalableBusFreq;
1089
1090 /*
1091 * Copy the MSR range.
1092 */
1093 uint32_t cMsrs = 0;
1094 PCPUMMSRRANGE paMsrs = NULL;
1095
1096 PCCPUMMSRRANGE pCurMsr = pEntry->paMsrRanges;
1097 uint32_t cLeft = pEntry->cMsrRanges;
1098 while (cLeft-- > 0)
1099 {
1100 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &paMsrs, &cMsrs, pCurMsr);
1101 if (RT_FAILURE(rc))
1102 {
1103 Assert(!paMsrs); /* The above function frees this. */
1104 RTMemFree(pInfo->paCpuIdLeavesR3);
1105 pInfo->paCpuIdLeavesR3 = NULL;
1106 return rc;
1107 }
1108 pCurMsr++;
1109 }
1110
1111 pInfo->paMsrRangesR3 = paMsrs;
1112 pInfo->cMsrRanges = cMsrs;
1113 return VINF_SUCCESS;
1114}
1115
1116
1117/**
1118 * Insert an MSR range into the VM.
1119 *
1120 * If the new MSR range overlaps existing ranges, the existing ones will be
1121 * adjusted/removed to fit in the new one.
1122 *
1123 * @returns VBox status code.
1124 * @param pVM The cross context VM structure.
1125 * @param pNewRange Pointer to the MSR range being inserted.
1126 */
1127VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange)
1128{
1129 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1130 AssertReturn(pNewRange, VERR_INVALID_PARAMETER);
1131
1132 return cpumR3MsrRangesInsert(pVM, NULL /* ppaMsrRanges */, NULL /* pcMsrRanges */, pNewRange);
1133}
1134
1135
1136/**
1137 * Register statistics for the MSRs.
1138 *
1139 * This must not be called before the MSRs have been finalized and moved to the
1140 * hyper heap.
1141 *
1142 * @returns VBox status code.
1143 * @param pVM The cross context VM structure.
1144 */
1145int cpumR3MsrRegStats(PVM pVM)
1146{
1147 /*
1148 * Global statistics.
1149 */
1150 PCPUM pCpum = &pVM->cpum.s;
1151 STAM_REL_REG(pVM, &pCpum->cMsrReads, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Reads",
1152 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
1153 STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsRaisingGP",
1154 STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
1155 STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsUnknown",
1156 STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
1157 STAM_REL_REG(pVM, &pCpum->cMsrWrites, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Writes",
1158 STAMUNIT_OCCURENCES, "All WRMSRs making it to CPUM.");
1159 STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesRaisingGP",
1160 STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
1161 STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
1162 STAMUNIT_OCCURENCES, "Writing of ignored bits.");
1163 STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesUnknown",
1164 STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
1165
1166
1167# ifdef VBOX_WITH_STATISTICS
1168 /*
1169 * Per range.
1170 */
1171 PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
1172 uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges;
1173 for (uint32_t i = 0; i < cRanges; i++)
1174 {
1175 char szName[160];
1176 ssize_t cchName;
1177
1178 if (paRanges[i].uFirst == paRanges[i].uLast)
1179 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
1180 paRanges[i].uFirst, paRanges[i].szName);
1181 else
1182 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
1183 paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
1184
1185 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
1186 STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
1187
1188 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
1189 STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
1190
1191 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
1192 STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
1193
1194 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
1195 STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
1196 }
1197# endif /* VBOX_WITH_STATISTICS */
1198
1199 return VINF_SUCCESS;
1200}
1201
1202#endif /* !CPUM_DB_STANDALONE */
1203
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette