VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3Msr-x86.cpp@ 109008

Last change on this file since 109008 was 109008, checked in by vboxsync, 5 days ago

VMM,Main: Working on ARM CPU profile support, which is neede/useful for getting info about the host CPU as well. The CPUDBENTRY typedef is used externally by Main, so we can't have two definitions of it, so left the bits that are common to both x86 and ARM in CPUDBENTRY and created sub-structures for each of the two targets/platforms. Also started reworking the VBoxCpuReport tool so we can use it on arm as well (much left to do there, though). jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
  • Property svn:mergeinfo set to (toggle deleted branches)
    /branches/VBox-3.0/src/VBox/VMM/VMMR3/CPUMR3Db.cpp58652,​70973
    /branches/VBox-3.2/src/VBox/VMM/VMMR3/CPUMR3Db.cpp66309,​66318
    /branches/VBox-4.0/src/VBox/VMM/VMMR3/CPUMR3Db.cpp70873
    /branches/VBox-4.1/src/VBox/VMM/VMMR3/CPUMR3Db.cpp74233,​78414,​78691,​81841,​82127,​85941,​85944-85947,​85949-85950,​85953,​86701,​86728,​87009
    /branches/VBox-4.2/src/VBox/VMM/VMMR3/CPUMR3Db.cpp86229-86230,​86234,​86529,​91503-91504,​91506-91508,​91510,​91514-91515,​91521,​108112,​108114,​108127
    /branches/VBox-4.3/src/VBox/VMM/VMMR3/CPUMR3Db.cpp89714,​91223,​93628-93629,​94066,​94839,​94897,​95154,​95164,​95167,​95295,​95338,​95353-95354,​95356,​95367,​95451,​95475,​95477,​95480,​95507,​95640,​95659,​95661,​95663,​98913-98914
    /branches/VBox-4.3/trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp91223
    /branches/VBox-5.0/src/VBox/VMM/VMMR3/CPUMR3Db.cpp104938,​104943,​104950,​104987-104988,​104990,​106453
    /branches/VBox-5.1/src/VBox/VMM/VMMR3/CPUMR3Db.cpp112367,​116543,​116550,​116568,​116573
    /branches/VBox-5.2/src/VBox/VMM/VMMR3/CPUMR3Db.cpp119536,​120083,​120099,​120213,​120221,​120239,​123597-123598,​123600-123601,​123755,​124263,​124273,​124277-124279,​124284-124286,​124288-124290,​125768,​125779-125780,​125812,​127158-127159,​127162-127167,​127180
    /branches/VBox-6.0/src/VBox/VMM/VMMR3/CPUMR3Db.cpp130474-130475,​130477,​130479,​131352
    /branches/VBox-6.1/src/VBox/VMM/VMMR3/CPUMR3Db.cpp141521,​141567-141568,​141588-141590,​141592-141595,​141652,​141920,​158257-158259
    /branches/VBox-7.0/src/VBox/VMM/VMMR3/CPUMR3Db.cpp156229,​156768
    /branches/aeichner/vbox-chromium-cleanup/src/VBox/VMM/VMMR3/CPUMR3Db.cpp129818-129851,​129853-129861,​129871-129872,​129876,​129880,​129882,​130013-130015,​130094-130095
    /branches/andy/draganddrop/src/VBox/VMM/VMMR3/CPUMR3Db.cpp90781-91268
    /branches/andy/guestctrl20/src/VBox/VMM/VMMR3/CPUMR3Db.cpp78916,​78930
    /branches/andy/pdmaudio/src/VBox/VMM/VMMR3/CPUMR3Db.cpp94582,​94641,​94654,​94688,​94778,​94783,​94816,​95197,​95215-95216,​95250,​95279,​95505-95506,​95543,​95694,​96323,​96470-96471,​96582,​96587,​96802-96803,​96817,​96904,​96967,​96999,​97020-97021,​97025,​97050,​97099
    /branches/bird/hardenedwindows/src/VBox/VMM/VMMR3/CPUMR3Db.cpp92692-94610
    /branches/dsen/gui/src/VBox/VMM/VMMR3/CPUMR3Db.cpp79076-79078,​79089,​79109-79110,​79112-79113,​79127-79130,​79134,​79141,​79151,​79155,​79157-79159,​79193,​79197
    /branches/dsen/gui2/src/VBox/VMM/VMMR3/CPUMR3Db.cpp79224,​79228,​79233,​79235,​79258,​79262-79263,​79273,​79341,​79345,​79354,​79357,​79387-79388,​79559-79569,​79572-79573,​79578,​79581-79582,​79590-79591,​79598-79599,​79602-79603,​79605-79606,​79632,​79635,​79637,​79644
    /branches/dsen/gui3/src/VBox/VMM/VMMR3/CPUMR3Db.cpp79645-79692
    /branches/dsen/gui4/src/VBox/VMM/VMMR3/CPUMR3Db.cpp155183-155185,​155187,​155198,​155200-155201,​155205,​155228,​155235,​155243,​155248,​155282,​155285,​155287-155288,​155311,​155316,​155336,​155342,​155344,​155437-155438,​155441,​155443,​155488,​155509-155513,​155526-155527,​155559,​155572,​155576-155577,​155592-155593
File size: 23.3 KB
Line 
1/* $Id: CPUMR3Msr-x86.cpp 109008 2025-04-16 20:59:36Z vboxsync $ */
2/** @file
3 * CPUM - CPU database part.
4 */
5
6/*
7 * Copyright (C) 2013-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include "CPUMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/mm.h>
37
38#include <VBox/err.h>
39#if !defined(RT_ARCH_ARM64)
40# include <iprt/asm-amd64-x86.h>
41#endif
42#include <iprt/mem.h>
43#include <iprt/string.h>
44
45#include "CPUMR3Msr-x86.h"
46
47
48/**
49 * Binary search used by cpumR3MsrRangesInsert and has some special properties
50 * wrt to mismatches.
51 *
52 * @returns Insert location.
53 * @param paMsrRanges The MSR ranges to search.
54 * @param cMsrRanges The number of MSR ranges.
55 * @param uMsr What to search for.
56 */
57static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
58{
59 if (!cMsrRanges)
60 return 0;
61
62 uint32_t iStart = 0;
63 uint32_t iLast = cMsrRanges - 1;
64 for (;;)
65 {
66 uint32_t i = iStart + (iLast - iStart + 1) / 2;
67 if ( uMsr >= paMsrRanges[i].uFirst
68 && uMsr <= paMsrRanges[i].uLast)
69 return i;
70 if (uMsr < paMsrRanges[i].uFirst)
71 {
72 if (i <= iStart)
73 return i;
74 iLast = i - 1;
75 }
76 else
77 {
78 if (i >= iLast)
79 {
80 if (i < cMsrRanges)
81 i++;
82 return i;
83 }
84 iStart = i + 1;
85 }
86 }
87}
88
89
90/**
91 * Ensures that there is space for at least @a cNewRanges in the table,
92 * reallocating the table if necessary.
93 *
94 * @returns Pointer to the MSR ranges on success, NULL on failure. On failure
95 * @a *ppaMsrRanges is freed and set to NULL.
96 * @param pVM The cross context VM structure. If NULL,
97 * use the process heap, otherwise the VM's hyper heap.
98 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
99 * @param cMsrRanges The current number of ranges.
100 * @param cNewRanges The number of ranges to be added.
101 */
102static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
103{
104 if ( cMsrRanges + cNewRanges
105 > RT_ELEMENTS(pVM->cpum.s.GuestInfo.aMsrRanges) + (pVM ? 0 : 128 /* Catch too many MSRs in CPU reporter! */))
106 {
107 LogRel(("CPUM: Too many MSR ranges! %#x, max %#x\n",
108 cMsrRanges + cNewRanges, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aMsrRanges)));
109 return NULL;
110 }
111 if (pVM)
112 {
113 Assert(cMsrRanges == pVM->cpum.s.GuestInfo.cMsrRanges);
114 Assert(*ppaMsrRanges == pVM->cpum.s.GuestInfo.aMsrRanges);
115 }
116 else
117 {
118 if (cMsrRanges + cNewRanges > RT_ALIGN_32(cMsrRanges, 16))
119 {
120
121 uint32_t const cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
122 void *pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
123 if (pvNew)
124 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
125 else
126 {
127 RTMemFree(*ppaMsrRanges);
128 *ppaMsrRanges = NULL;
129 return NULL;
130 }
131 }
132 }
133
134 return *ppaMsrRanges;
135}
136
137
138/**
139 * Inserts a new MSR range in into an sorted MSR range array.
140 *
141 * If the new MSR range overlaps existing ranges, the existing ones will be
142 * adjusted/removed to fit in the new one.
143 *
144 * @returns VBox status code.
145 * @retval VINF_SUCCESS
146 * @retval VERR_NO_MEMORY
147 *
148 * @param pVM The cross context VM structure. If NULL,
149 * use the process heap, otherwise the VM's hyper heap.
150 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
151 * Must be NULL if using the hyper heap.
152 * @param pcMsrRanges The variable holding number of ranges. Must be NULL
153 * if using the hyper heap.
154 * @param pNewRange The new range.
155 */
156int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
157{
158 Assert(pNewRange->uLast >= pNewRange->uFirst);
159 Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
160 Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
161
162 /*
163 * Validate and use the VM's MSR ranges array if we are using the hyper heap.
164 */
165 if (pVM)
166 {
167 AssertReturn(!ppaMsrRanges, VERR_INVALID_PARAMETER);
168 AssertReturn(!pcMsrRanges, VERR_INVALID_PARAMETER);
169 AssertReturn(pVM->cpum.s.GuestInfo.paMsrRangesR3 == pVM->cpum.s.GuestInfo.aMsrRanges, VERR_INTERNAL_ERROR_3);
170
171 ppaMsrRanges = &pVM->cpum.s.GuestInfo.paMsrRangesR3;
172 pcMsrRanges = &pVM->cpum.s.GuestInfo.cMsrRanges;
173 }
174 else
175 {
176 AssertReturn(ppaMsrRanges, VERR_INVALID_POINTER);
177 AssertReturn(pcMsrRanges, VERR_INVALID_POINTER);
178 }
179
180 uint32_t cMsrRanges = *pcMsrRanges;
181 PCPUMMSRRANGE paMsrRanges = *ppaMsrRanges;
182
183 /*
184 * Optimize the linear insertion case where we add new entries at the end.
185 */
186 if ( cMsrRanges > 0
187 && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
188 {
189 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
190 if (!paMsrRanges)
191 return VERR_NO_MEMORY;
192 paMsrRanges[cMsrRanges] = *pNewRange;
193 *pcMsrRanges += 1;
194 }
195 else
196 {
197 uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
198 Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
199 Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
200
201 /*
202 * Adding an entirely new entry?
203 */
204 if ( i >= cMsrRanges
205 || pNewRange->uLast < paMsrRanges[i].uFirst)
206 {
207 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
208 if (!paMsrRanges)
209 return VERR_NO_MEMORY;
210 if (i < cMsrRanges)
211 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
212 paMsrRanges[i] = *pNewRange;
213 *pcMsrRanges += 1;
214 }
215 /*
216 * Replace existing entry?
217 */
218 else if ( pNewRange->uFirst == paMsrRanges[i].uFirst
219 && pNewRange->uLast == paMsrRanges[i].uLast)
220 paMsrRanges[i] = *pNewRange;
221 /*
222 * Splitting an existing entry?
223 */
224 else if ( pNewRange->uFirst > paMsrRanges[i].uFirst
225 && pNewRange->uLast < paMsrRanges[i].uLast)
226 {
227 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 2);
228 if (!paMsrRanges)
229 return VERR_NO_MEMORY;
230 Assert(i < cMsrRanges);
231 memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
232 paMsrRanges[i + 1] = *pNewRange;
233 paMsrRanges[i + 2] = paMsrRanges[i];
234 paMsrRanges[i ].uLast = pNewRange->uFirst - 1;
235 paMsrRanges[i + 2].uFirst = pNewRange->uLast + 1;
236 *pcMsrRanges += 2;
237 }
238 /*
239 * Complicated scenarios that can affect more than one range.
240 *
241 * The current code does not optimize memmove calls when replacing
242 * one or more existing ranges, because it's tedious to deal with and
243 * not expected to be a frequent usage scenario.
244 */
245 else
246 {
247 /* Adjust start of first match? */
248 if ( pNewRange->uFirst <= paMsrRanges[i].uFirst
249 && pNewRange->uLast < paMsrRanges[i].uLast)
250 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
251 else
252 {
253 /* Adjust end of first match? */
254 if (pNewRange->uFirst > paMsrRanges[i].uFirst)
255 {
256 Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
257 paMsrRanges[i].uLast = pNewRange->uFirst - 1;
258 i++;
259 }
260 /* Replace the whole first match (lazy bird). */
261 else
262 {
263 if (i + 1 < cMsrRanges)
264 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
265 cMsrRanges = *pcMsrRanges -= 1;
266 }
267
268 /* Do the new range affect more ranges? */
269 while ( i < cMsrRanges
270 && pNewRange->uLast >= paMsrRanges[i].uFirst)
271 {
272 if (pNewRange->uLast < paMsrRanges[i].uLast)
273 {
274 /* Adjust the start of it, then we're done. */
275 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
276 break;
277 }
278
279 /* Remove it entirely. */
280 if (i + 1 < cMsrRanges)
281 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
282 cMsrRanges = *pcMsrRanges -= 1;
283 }
284 }
285
286 /* Now, perform a normal insertion. */
287 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
288 if (!paMsrRanges)
289 return VERR_NO_MEMORY;
290 if (i < cMsrRanges)
291 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
292 paMsrRanges[i] = *pNewRange;
293 *pcMsrRanges += 1;
294 }
295 }
296
297 return VINF_SUCCESS;
298}
299
300
301/**
302 * Reconciles CPUID info with MSRs (selected ones).
303 *
304 * @returns VBox status code.
305 * @param pVM The cross context VM structure.
306 * @param fForceFlushCmd Make sure MSR_IA32_FLUSH_CMD is present.
307 * @param fForceSpecCtrl Make sure MSR_IA32_SPEC_CTRL is present.
308 */
309DECLHIDDEN(int) cpumR3MsrReconcileWithCpuId(PVM pVM, bool fForceFlushCmd, bool fForceSpecCtrl)
310{
311 PCCPUMMSRRANGE apToAdd[10];
312 uint32_t cToAdd = 0;
313
314 /*
315 * The IA32_FLUSH_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates.
316 */
317 if ( pVM->cpum.s.GuestFeatures.fFlushCmd
318 || fForceFlushCmd)
319 {
320 static CPUMMSRRANGE const s_FlushCmd =
321 {
322 /*.uFirst =*/ MSR_IA32_FLUSH_CMD,
323 /*.uLast =*/ MSR_IA32_FLUSH_CMD,
324 /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly,
325 /*.enmWrFn =*/ kCpumMsrWrFn_Ia32FlushCmd,
326 /*.offCpumCpu =*/ UINT16_MAX,
327 /*.fReserved =*/ 0,
328 /*.uValue =*/ 0,
329 /*.fWrIgnMask =*/ 0,
330 /*.fWrGpMask =*/ ~MSR_IA32_FLUSH_CMD_F_L1D,
331 /*.szName = */ "IA32_FLUSH_CMD"
332 };
333 apToAdd[cToAdd++] = &s_FlushCmd;
334 }
335
336 /*
337 * The IA32_PRED_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates.
338 */
339 if ( pVM->cpum.s.GuestFeatures.fIbpb
340 /** @todo || pVM->cpum.s.GuestFeatures.fSbpb*/)
341 {
342 static CPUMMSRRANGE const s_PredCmd =
343 {
344 /*.uFirst =*/ MSR_IA32_PRED_CMD,
345 /*.uLast =*/ MSR_IA32_PRED_CMD,
346 /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly,
347 /*.enmWrFn =*/ kCpumMsrWrFn_Ia32PredCmd,
348 /*.offCpumCpu =*/ UINT16_MAX,
349 /*.fReserved =*/ 0,
350 /*.uValue =*/ 0,
351 /*.fWrIgnMask =*/ 0,
352 /*.fWrGpMask =*/ ~MSR_IA32_PRED_CMD_F_IBPB,
353 /*.szName = */ "IA32_PRED_CMD"
354 };
355 apToAdd[cToAdd++] = &s_PredCmd;
356 }
357
358 /*
359 * The IA32_SPEC_CTRL MSR was introduced in MCUs for CVS-2018-3646 and associates.
360 */
361 if ( pVM->cpum.s.GuestFeatures.fSpecCtrlMsr
362 || fForceSpecCtrl)
363 {
364 static CPUMMSRRANGE const s_SpecCtrl =
365 {
366 /*.uFirst =*/ MSR_IA32_SPEC_CTRL,
367 /*.uLast =*/ MSR_IA32_SPEC_CTRL,
368 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32SpecCtrl,
369 /*.enmWrFn =*/ kCpumMsrWrFn_Ia32SpecCtrl,
370 /*.offCpumCpu =*/ UINT16_MAX,
371 /*.fReserved =*/ 0,
372 /*.uValue =*/ 0,
373 /*.fWrIgnMask =*/ 0,
374 /*.fWrGpMask =*/ 0,
375 /*.szName = */ "IA32_SPEC_CTRL"
376 };
377 apToAdd[cToAdd++] = &s_SpecCtrl;
378 }
379
380 /*
381 * The MSR_IA32_ARCH_CAPABILITIES was introduced in various spectre MCUs, or at least
382 * documented in relation to such.
383 */
384 if (pVM->cpum.s.GuestFeatures.fArchCap)
385 {
386 static CPUMMSRRANGE const s_ArchCaps =
387 {
388 /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES,
389 /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES,
390 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities,
391 /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly,
392 /*.offCpumCpu =*/ UINT16_MAX,
393 /*.fReserved =*/ 0,
394 /*.uValue =*/ 0,
395 /*.fWrIgnMask =*/ 0,
396 /*.fWrGpMask =*/ UINT64_MAX,
397 /*.szName = */ "IA32_ARCH_CAPABILITIES"
398 };
399 apToAdd[cToAdd++] = &s_ArchCaps;
400 }
401
402 /*
403 * Do the adding.
404 */
405 Assert(cToAdd <= RT_ELEMENTS(apToAdd));
406 for (uint32_t i = 0; i < cToAdd; i++)
407 {
408 PCCPUMMSRRANGE pRange = apToAdd[i];
409 Assert(pRange->uFirst == pRange->uLast);
410 if (!cpumLookupMsrRange(pVM, pRange->uFirst))
411 {
412 LogRel(("CPUM: MSR/CPUID reconciliation insert: %#010x %s\n", pRange->uFirst, pRange->szName));
413 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3,
414 &pVM->cpum.s.GuestInfo.cMsrRanges, pRange);
415 AssertRCReturn(rc, rc);
416 }
417 }
418 return VINF_SUCCESS;
419}
420
421
422/**
423 * Worker for cpumR3MsrApplyFudge that applies one table.
424 *
425 * @returns VBox status code.
426 * @param pVM The cross context VM structure.
427 * @param paRanges Array of MSRs to fudge.
428 * @param cRanges Number of MSRs in the array.
429 */
430static int cpumR3MsrApplyFudgeTable(PVM pVM, PCCPUMMSRRANGE paRanges, size_t cRanges)
431{
432 for (uint32_t i = 0; i < cRanges; i++)
433 if (!cpumLookupMsrRange(pVM, paRanges[i].uFirst))
434 {
435 LogRel(("CPUM: MSR fudge: %#010x %s\n", paRanges[i].uFirst, paRanges[i].szName));
436 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
437 &paRanges[i]);
438 if (RT_FAILURE(rc))
439 return rc;
440 }
441 return VINF_SUCCESS;
442}
443
444
445/**
446 * Fudges the MSRs that guest are known to access in some odd cases.
447 *
448 * A typical example is a VM that has been moved between different hosts where
449 * for instance the cpu vendor differs.
450 *
451 * Another example is older CPU profiles (e.g. Atom Bonnet) for newer CPUs (e.g.
452 * Atom Silvermont), where features reported thru CPUID aren't present in the
453 * MSRs (e.g. AMD64_TSC_AUX).
454 *
455 *
456 * @returns VBox status code.
457 * @param pVM The cross context VM structure.
458 */
459int cpumR3MsrApplyFudge(PVM pVM)
460{
461 /*
462 * Basic.
463 */
464 static CPUMMSRRANGE const s_aFudgeMsrs[] =
465 {
466 MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr),
467 MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX),
468 MVO(0x00000017, "IA32_PLATFORM_ID", 0),
469 MFN(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase),
470 MVI(0x0000008b, "BIOS_SIGN", 0),
471 MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0),
472 MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x005, 0, 0),
473 MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0),
474 MFN(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable),
475 MFN(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl),
476 MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp),
477 MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp),
478 MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp),
479 MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp),
480 MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
481 MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
482 MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
483 };
484 int rc = cpumR3MsrApplyFudgeTable(pVM, &s_aFudgeMsrs[0], RT_ELEMENTS(s_aFudgeMsrs));
485 AssertLogRelRCReturn(rc, rc);
486
487 /*
488 * XP might mistake opterons and other newer CPUs for P4s.
489 */
490 if (pVM->cpum.s.GuestFeatures.uFamily >= 0xf)
491 {
492 static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
493 {
494 MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
495 };
496 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aP4FudgeMsrs[0], RT_ELEMENTS(s_aP4FudgeMsrs));
497 AssertLogRelRCReturn(rc, rc);
498 }
499
500 if (pVM->cpum.s.GuestFeatures.fRdTscP)
501 {
502 static CPUMMSRRANGE const s_aRdTscPFudgeMsrs[] =
503 {
504 MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX),
505 };
506 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aRdTscPFudgeMsrs[0], RT_ELEMENTS(s_aRdTscPFudgeMsrs));
507 AssertLogRelRCReturn(rc, rc);
508 }
509
510 /*
511 * Windows 10 incorrectly writes to MSR_IA32_TSX_CTRL without checking
512 * CPUID.ARCH_CAP(EAX=7h,ECX=0):EDX[bit 29] or the MSR feature bits in
513 * MSR_IA32_ARCH_CAPABILITIES[bit 7], see @bugref{9630}.
514 * Ignore writes to this MSR and return 0 on reads.
515 *
516 * Windows 11 24H2 incorrectly reads MSR_IA32_MCU_OPT_CTRL without
517 * checking CPUID.ARCH_CAP(EAX=7h,ECX=0).EDX[bit 9] or the MSR feature
518 * bits in MSR_IA32_ARCH_CAPABILITIES[bit 18], see @bugref{10794}.
519 * Ignore wrties to this MSR and return 0 on reads.
520 */
521 if (pVM->cpum.s.GuestFeatures.fArchCap)
522 {
523 static CPUMMSRRANGE const s_aTsxCtrl[] =
524 {
525 MVI(MSR_IA32_TSX_CTRL, "IA32_TSX_CTRL", 0),
526 MVI(MSR_IA32_MCU_OPT_CTRL, "IA32_MCU_OPT_CTRL", 0),
527 };
528 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aTsxCtrl[0], RT_ELEMENTS(s_aTsxCtrl));
529 AssertLogRelRCReturn(rc, rc);
530 }
531
532 return rc;
533}
534
535
536/**
537 * Insert an MSR range into the VM.
538 *
539 * If the new MSR range overlaps existing ranges, the existing ones will be
540 * adjusted/removed to fit in the new one.
541 *
542 * @returns VBox status code.
543 * @param pVM The cross context VM structure.
544 * @param pNewRange Pointer to the MSR range being inserted.
545 */
546VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange)
547{
548 AssertReturn(pVM, VERR_INVALID_PARAMETER);
549 AssertReturn(pNewRange, VERR_INVALID_PARAMETER);
550
551 return cpumR3MsrRangesInsert(pVM, NULL /* ppaMsrRanges */, NULL /* pcMsrRanges */, pNewRange);
552}
553
554
555/**
556 * Register statistics for the MSRs.
557 *
558 * This must not be called before the MSRs have been finalized and moved to the
559 * hyper heap.
560 *
561 * @returns VBox status code.
562 * @param pVM The cross context VM structure.
563 */
564int cpumR3MsrRegStats(PVM pVM)
565{
566 /*
567 * Global statistics.
568 */
569 PCPUM pCpum = &pVM->cpum.s;
570 STAM_REL_REG(pVM, &pCpum->cMsrReads, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Reads",
571 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
572 STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsRaisingGP",
573 STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
574 STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsUnknown",
575 STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
576 STAM_REL_REG(pVM, &pCpum->cMsrWrites, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Writes",
577 STAMUNIT_OCCURENCES, "All WRMSRs making it to CPUM.");
578 STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesRaisingGP",
579 STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
580 STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
581 STAMUNIT_OCCURENCES, "Writing of ignored bits.");
582 STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesUnknown",
583 STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
584
585
586#ifdef VBOX_WITH_STATISTICS
587 /*
588 * Per range.
589 */
590 PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
591 uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges;
592 for (uint32_t i = 0; i < cRanges; i++)
593 {
594 char szName[160];
595 ssize_t cchName;
596
597 if (paRanges[i].uFirst == paRanges[i].uLast)
598 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
599 paRanges[i].uFirst, paRanges[i].szName);
600 else
601 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
602 paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
603
604 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
605 STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
606
607 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
608 STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
609
610 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
611 STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
612
613 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
614 STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
615 }
616#endif /* VBOX_WITH_STATISTICS */
617
618 return VINF_SUCCESS;
619}
620
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette