VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp@ 99949

Last change on this file since 99949 was 99775, checked in by vboxsync, 21 months ago

*: Mark functions as static if not used outside of a given compilation unit. Enables the compiler to optimize inlining, reduces the symbol tables, exposes unused functions and in some rare cases exposes mismtaches between function declarations and definitions, but most importantly reduces the number of parfait reports for the extern-function-no-forward-declaration category. This should not result in any functional changes, bugref:3409

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 272.2 KB
Line 
1/* $Id: CPUMR3CpuId.cpp 99775 2023-05-12 12:21:58Z vboxsync $ */
2/** @file
3 * CPUM - CPU ID part.
4 */
5
6/*
7 * Copyright (C) 2013-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/hm.h>
36#include <VBox/vmm/nem.h>
37#include <VBox/vmm/ssm.h>
38#include "CPUMInternal.h"
39#include <VBox/vmm/vmcc.h>
40#include <VBox/sup.h>
41
42#include <VBox/err.h>
43#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
44# include <iprt/asm-amd64-x86.h>
45#endif
46#include <iprt/ctype.h>
47#include <iprt/mem.h>
48#include <iprt/string.h>
49#include <iprt/x86-helpers.h>
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55/** For sanity and avoid wasting hyper heap on buggy config / saved state. */
56#define CPUM_CPUID_MAX_LEAVES 2048
57
58
59#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
60/**
61 * Determins the host CPU MXCSR mask.
62 *
63 * @returns MXCSR mask.
64 */
65VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void)
66{
67 if ( ASMHasCpuId()
68 && RTX86IsValidStdRange(ASMCpuId_EAX(0))
69 && ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_FXSR)
70 {
71 uint8_t volatile abBuf[sizeof(X86FXSTATE) + 64];
72 PX86FXSTATE pState = (PX86FXSTATE)&abBuf[64 - ((uintptr_t)&abBuf[0] & 63)];
73 RT_ZERO(*pState);
74 ASMFxSave(pState);
75 if (pState->MXCSR_MASK == 0)
76 return 0xffbf;
77 return pState->MXCSR_MASK;
78 }
79 return 0;
80}
81#endif
82
83
84
85#ifndef IN_VBOX_CPU_REPORT
86/**
87 * Gets a matching leaf in the CPUID leaf array, converted to a CPUMCPUID.
88 *
89 * @returns true if found, false it not.
90 * @param paLeaves The CPUID leaves to search. This is sorted.
91 * @param cLeaves The number of leaves in the array.
92 * @param uLeaf The leaf to locate.
93 * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
94 * @param pLegacy The legacy output leaf.
95 */
96static bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf,
97 PCPUMCPUID pLegacy)
98{
99 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, uLeaf, uSubLeaf);
100 if (pLeaf)
101 {
102 pLegacy->uEax = pLeaf->uEax;
103 pLegacy->uEbx = pLeaf->uEbx;
104 pLegacy->uEcx = pLeaf->uEcx;
105 pLegacy->uEdx = pLeaf->uEdx;
106 return true;
107 }
108 return false;
109}
110#endif /* IN_VBOX_CPU_REPORT */
111
112
113/**
114 * Inserts a CPU ID leaf, replacing any existing ones.
115 *
116 * When inserting a simple leaf where we already got a series of sub-leaves with
117 * the same leaf number (eax), the simple leaf will replace the whole series.
118 *
119 * When pVM is NULL, this ASSUMES that the leaves array is still on the normal
120 * host-context heap and has only been allocated/reallocated by the
121 * cpumCpuIdEnsureSpace function.
122 *
123 * @returns VBox status code.
124 * @param pVM The cross context VM structure. If NULL, use
125 * the process heap, otherwise the VM's hyper heap.
126 * @param ppaLeaves Pointer to the pointer to the array of sorted
127 * CPUID leaves and sub-leaves. Must be NULL if using
128 * the hyper heap.
129 * @param pcLeaves Where we keep the leaf count for *ppaLeaves. Must
130 * be NULL if using the hyper heap.
131 * @param pNewLeaf Pointer to the data of the new leaf we're about to
132 * insert.
133 */
134static int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf)
135{
136 /*
137 * Validate input parameters if we are using the hyper heap and use the VM's CPUID arrays.
138 */
139 if (pVM)
140 {
141 AssertReturn(!ppaLeaves, VERR_INVALID_PARAMETER);
142 AssertReturn(!pcLeaves, VERR_INVALID_PARAMETER);
143 AssertReturn(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3 == pVM->cpum.s.GuestInfo.aCpuIdLeaves, VERR_INVALID_PARAMETER);
144
145 ppaLeaves = &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
146 pcLeaves = &pVM->cpum.s.GuestInfo.cCpuIdLeaves;
147 }
148
149 PCPUMCPUIDLEAF paLeaves = *ppaLeaves;
150 uint32_t cLeaves = *pcLeaves;
151
152 /*
153 * Validate the new leaf a little.
154 */
155 AssertLogRelMsgReturn(!(pNewLeaf->fFlags & ~CPUMCPUIDLEAF_F_VALID_MASK),
156 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fFlags),
157 VERR_INVALID_FLAGS);
158 AssertLogRelMsgReturn(pNewLeaf->fSubLeafMask != 0 || pNewLeaf->uSubLeaf == 0,
159 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
160 VERR_INVALID_PARAMETER);
161 AssertLogRelMsgReturn(RT_IS_POWER_OF_TWO(pNewLeaf->fSubLeafMask + 1),
162 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
163 VERR_INVALID_PARAMETER);
164 AssertLogRelMsgReturn((pNewLeaf->fSubLeafMask & pNewLeaf->uSubLeaf) == pNewLeaf->uSubLeaf,
165 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
166 VERR_INVALID_PARAMETER);
167
168 /*
169 * Find insertion point. The lazy bird uses the same excuse as in
170 * cpumCpuIdGetLeaf(), but optimizes for linear insertion (saved state).
171 */
172 uint32_t i;
173 if ( cLeaves > 0
174 && paLeaves[cLeaves - 1].uLeaf < pNewLeaf->uLeaf)
175 {
176 /* Add at end. */
177 i = cLeaves;
178 }
179 else if ( cLeaves > 0
180 && paLeaves[cLeaves - 1].uLeaf == pNewLeaf->uLeaf)
181 {
182 /* Either replacing the last leaf or dealing with sub-leaves. Spool
183 back to the first sub-leaf to pretend we did the linear search. */
184 i = cLeaves - 1;
185 while ( i > 0
186 && paLeaves[i - 1].uLeaf == pNewLeaf->uLeaf)
187 i--;
188 }
189 else
190 {
191 /* Linear search from the start. */
192 i = 0;
193 while ( i < cLeaves
194 && paLeaves[i].uLeaf < pNewLeaf->uLeaf)
195 i++;
196 }
197 if ( i < cLeaves
198 && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
199 {
200 if (paLeaves[i].fSubLeafMask != pNewLeaf->fSubLeafMask)
201 {
202 /*
203 * The sub-leaf mask differs, replace all existing leaves with the
204 * same leaf number.
205 */
206 uint32_t c = 1;
207 while ( i + c < cLeaves
208 && paLeaves[i + c].uLeaf == pNewLeaf->uLeaf)
209 c++;
210 if (c > 1 && i + c < cLeaves)
211 {
212 memmove(&paLeaves[i + c], &paLeaves[i + 1], (cLeaves - i - c) * sizeof(paLeaves[0]));
213 *pcLeaves = cLeaves -= c - 1;
214 }
215
216 paLeaves[i] = *pNewLeaf;
217#ifdef VBOX_STRICT
218 cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
219#endif
220 return VINF_SUCCESS;
221 }
222
223 /* Find sub-leaf insertion point. */
224 while ( i < cLeaves
225 && paLeaves[i].uSubLeaf < pNewLeaf->uSubLeaf
226 && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
227 i++;
228
229 /*
230 * If we've got an exactly matching leaf, replace it.
231 */
232 if ( i < cLeaves
233 && paLeaves[i].uLeaf == pNewLeaf->uLeaf
234 && paLeaves[i].uSubLeaf == pNewLeaf->uSubLeaf)
235 {
236 paLeaves[i] = *pNewLeaf;
237#ifdef VBOX_STRICT
238 cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
239#endif
240 return VINF_SUCCESS;
241 }
242 }
243
244 /*
245 * Adding a new leaf at 'i'.
246 */
247 AssertLogRelReturn(cLeaves < CPUM_CPUID_MAX_LEAVES, VERR_TOO_MANY_CPUID_LEAVES);
248 paLeaves = cpumCpuIdEnsureSpace(pVM, ppaLeaves, cLeaves);
249 if (!paLeaves)
250 return VERR_NO_MEMORY;
251
252 if (i < cLeaves)
253 memmove(&paLeaves[i + 1], &paLeaves[i], (cLeaves - i) * sizeof(paLeaves[0]));
254 *pcLeaves += 1;
255 paLeaves[i] = *pNewLeaf;
256
257#ifdef VBOX_STRICT
258 cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
259#endif
260 return VINF_SUCCESS;
261}
262
263
264#ifndef IN_VBOX_CPU_REPORT
265/**
266 * Removes a range of CPUID leaves.
267 *
268 * This will not reallocate the array.
269 *
270 * @param paLeaves The array of sorted CPUID leaves and sub-leaves.
271 * @param pcLeaves Where we keep the leaf count for @a paLeaves.
272 * @param uFirst The first leaf.
273 * @param uLast The last leaf.
274 */
275static void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast)
276{
277 uint32_t cLeaves = *pcLeaves;
278
279 Assert(uFirst <= uLast);
280
281 /*
282 * Find the first one.
283 */
284 uint32_t iFirst = 0;
285 while ( iFirst < cLeaves
286 && paLeaves[iFirst].uLeaf < uFirst)
287 iFirst++;
288
289 /*
290 * Find the end (last + 1).
291 */
292 uint32_t iEnd = iFirst;
293 while ( iEnd < cLeaves
294 && paLeaves[iEnd].uLeaf <= uLast)
295 iEnd++;
296
297 /*
298 * Adjust the array if anything needs removing.
299 */
300 if (iFirst < iEnd)
301 {
302 if (iEnd < cLeaves)
303 memmove(&paLeaves[iFirst], &paLeaves[iEnd], (cLeaves - iEnd) * sizeof(paLeaves[0]));
304 *pcLeaves = cLeaves -= (iEnd - iFirst);
305 }
306
307# ifdef VBOX_STRICT
308 cpumCpuIdAssertOrder(paLeaves, *pcLeaves);
309# endif
310}
311#endif /* IN_VBOX_CPU_REPORT */
312
313
314/**
315 * Gets a CPU ID leaf.
316 *
317 * @returns VBox status code.
318 * @param pVM The cross context VM structure.
319 * @param pLeaf Where to store the found leaf.
320 * @param uLeaf The leaf to locate.
321 * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
322 */
323VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf)
324{
325 PCPUMCPUIDLEAF pcLeaf = cpumCpuIdGetLeafInt(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
326 uLeaf, uSubLeaf);
327 if (pcLeaf)
328 {
329 memcpy(pLeaf, pcLeaf, sizeof(*pLeaf));
330 return VINF_SUCCESS;
331 }
332
333 return VERR_NOT_FOUND;
334}
335
336
337/**
338 * Gets all the leaves.
339 *
340 * This only works after the CPUID leaves have been initialized. The interface
341 * is intended for NEM and configuring CPUID leaves for the native hypervisor.
342 *
343 * @returns Pointer to the array of leaves. NULL on failure.
344 * @param pVM The cross context VM structure.
345 * @param pcLeaves Where to return the number of leaves.
346 */
347VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves)
348{
349 *pcLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
350 return pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
351}
352
353
354/**
355 * Inserts a CPU ID leaf, replacing any existing ones.
356 *
357 * @returns VBox status code.
358 * @param pVM The cross context VM structure.
359 * @param pNewLeaf Pointer to the leaf being inserted.
360 */
361VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf)
362{
363 /*
364 * Validate parameters.
365 */
366 AssertReturn(pVM, VERR_INVALID_PARAMETER);
367 AssertReturn(pNewLeaf, VERR_INVALID_PARAMETER);
368
369 /*
370 * Disallow replacing CPU ID leaves that this API currently cannot manage.
371 * These leaves have dependencies on saved-states, see PATMCpuidReplacement().
372 * If you want to modify these leaves, use CPUMSetGuestCpuIdFeature().
373 */
374 if ( pNewLeaf->uLeaf == UINT32_C(0x00000000) /* Standard */
375 || pNewLeaf->uLeaf == UINT32_C(0x00000001)
376 || pNewLeaf->uLeaf == UINT32_C(0x80000000) /* Extended */
377 || pNewLeaf->uLeaf == UINT32_C(0x80000001)
378 || pNewLeaf->uLeaf == UINT32_C(0xc0000000) /* Centaur */
379 || pNewLeaf->uLeaf == UINT32_C(0xc0000001) )
380 {
381 return VERR_NOT_SUPPORTED;
382 }
383
384 return cpumR3CpuIdInsert(pVM, NULL /* ppaLeaves */, NULL /* pcLeaves */, pNewLeaf);
385}
386
387
388#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
389/**
390 * Determines the method the CPU uses to handle unknown CPUID leaves.
391 *
392 * @returns VBox status code.
393 * @param penmUnknownMethod Where to return the method.
394 * @param pDefUnknown Where to return default unknown values. This
395 * will be set, even if the resulting method
396 * doesn't actually needs it.
397 */
398VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown)
399{
400 uint32_t uLastStd = ASMCpuId_EAX(0);
401 uint32_t uLastExt = ASMCpuId_EAX(0x80000000);
402 if (!RTX86IsValidExtRange(uLastExt))
403 uLastExt = 0x80000000;
404
405 uint32_t auChecks[] =
406 {
407 uLastStd + 1,
408 uLastStd + 5,
409 uLastStd + 8,
410 uLastStd + 32,
411 uLastStd + 251,
412 uLastExt + 1,
413 uLastExt + 8,
414 uLastExt + 15,
415 uLastExt + 63,
416 uLastExt + 255,
417 0x7fbbffcc,
418 0x833f7872,
419 0xefff2353,
420 0x35779456,
421 0x1ef6d33e,
422 };
423
424 static const uint32_t s_auValues[] =
425 {
426 0xa95d2156,
427 0x00000001,
428 0x00000002,
429 0x00000008,
430 0x00000000,
431 0x55773399,
432 0x93401769,
433 0x12039587,
434 };
435
436 /*
437 * Simple method, all zeros.
438 */
439 *penmUnknownMethod = CPUMUNKNOWNCPUID_DEFAULTS;
440 pDefUnknown->uEax = 0;
441 pDefUnknown->uEbx = 0;
442 pDefUnknown->uEcx = 0;
443 pDefUnknown->uEdx = 0;
444
445 /*
446 * Intel has been observed returning the last standard leaf.
447 */
448 uint32_t auLast[4];
449 ASMCpuIdExSlow(uLastStd, 0, 0, 0, &auLast[0], &auLast[1], &auLast[2], &auLast[3]);
450
451 uint32_t cChecks = RT_ELEMENTS(auChecks);
452 while (cChecks > 0)
453 {
454 uint32_t auCur[4];
455 ASMCpuIdExSlow(auChecks[cChecks - 1], 0, 0, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
456 if (memcmp(auCur, auLast, sizeof(auCur)))
457 break;
458 cChecks--;
459 }
460 if (cChecks == 0)
461 {
462 /* Now, what happens when the input changes? Esp. ECX. */
463 uint32_t cTotal = 0;
464 uint32_t cSame = 0;
465 uint32_t cLastWithEcx = 0;
466 uint32_t cNeither = 0;
467 uint32_t cValues = RT_ELEMENTS(s_auValues);
468 while (cValues > 0)
469 {
470 uint32_t uValue = s_auValues[cValues - 1];
471 uint32_t auLastWithEcx[4];
472 ASMCpuIdExSlow(uLastStd, uValue, uValue, uValue,
473 &auLastWithEcx[0], &auLastWithEcx[1], &auLastWithEcx[2], &auLastWithEcx[3]);
474
475 cChecks = RT_ELEMENTS(auChecks);
476 while (cChecks > 0)
477 {
478 uint32_t auCur[4];
479 ASMCpuIdExSlow(auChecks[cChecks - 1], uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
480 if (!memcmp(auCur, auLast, sizeof(auCur)))
481 {
482 cSame++;
483 if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
484 cLastWithEcx++;
485 }
486 else if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
487 cLastWithEcx++;
488 else
489 cNeither++;
490 cTotal++;
491 cChecks--;
492 }
493 cValues--;
494 }
495
496 Log(("CPUM: cNeither=%d cSame=%d cLastWithEcx=%d cTotal=%d\n", cNeither, cSame, cLastWithEcx, cTotal));
497 if (cSame == cTotal)
498 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF;
499 else if (cLastWithEcx == cTotal)
500 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX;
501 else
502 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF;
503 pDefUnknown->uEax = auLast[0];
504 pDefUnknown->uEbx = auLast[1];
505 pDefUnknown->uEcx = auLast[2];
506 pDefUnknown->uEdx = auLast[3];
507 return VINF_SUCCESS;
508 }
509
510 /*
511 * Unchanged register values?
512 */
513 cChecks = RT_ELEMENTS(auChecks);
514 while (cChecks > 0)
515 {
516 uint32_t const uLeaf = auChecks[cChecks - 1];
517 uint32_t cValues = RT_ELEMENTS(s_auValues);
518 while (cValues > 0)
519 {
520 uint32_t uValue = s_auValues[cValues - 1];
521 uint32_t auCur[4];
522 ASMCpuIdExSlow(uLeaf, uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
523 if ( auCur[0] != uLeaf
524 || auCur[1] != uValue
525 || auCur[2] != uValue
526 || auCur[3] != uValue)
527 break;
528 cValues--;
529 }
530 if (cValues != 0)
531 break;
532 cChecks--;
533 }
534 if (cChecks == 0)
535 {
536 *penmUnknownMethod = CPUMUNKNOWNCPUID_PASSTHRU;
537 return VINF_SUCCESS;
538 }
539
540 /*
541 * Just go with the simple method.
542 */
543 return VINF_SUCCESS;
544}
545#endif /* RT_ARCH_X86 || RT_ARCH_AMD64 */
546
547
548/**
549 * Translates a unknow CPUID leaf method into the constant name (sans prefix).
550 *
551 * @returns Read only name string.
552 * @param enmUnknownMethod The method to translate.
553 */
554VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod)
555{
556 switch (enmUnknownMethod)
557 {
558 case CPUMUNKNOWNCPUID_DEFAULTS: return "DEFAULTS";
559 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: return "LAST_STD_LEAF";
560 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: return "LAST_STD_LEAF_WITH_ECX";
561 case CPUMUNKNOWNCPUID_PASSTHRU: return "PASSTHRU";
562
563 case CPUMUNKNOWNCPUID_INVALID:
564 case CPUMUNKNOWNCPUID_END:
565 case CPUMUNKNOWNCPUID_32BIT_HACK:
566 break;
567 }
568 return "Invalid-unknown-CPUID-method";
569}
570
571
572/*
573 *
574 * Init related code.
575 * Init related code.
576 * Init related code.
577 *
578 *
579 */
580#ifndef IN_VBOX_CPU_REPORT
581
582
583/**
584 * Gets an exactly matching leaf + sub-leaf in the CPUID leaf array.
585 *
586 * This ignores the fSubLeafMask.
587 *
588 * @returns Pointer to the matching leaf, or NULL if not found.
589 * @param pCpum The CPUM instance data.
590 * @param uLeaf The leaf to locate.
591 * @param uSubLeaf The subleaf to locate.
592 */
593static PCPUMCPUIDLEAF cpumR3CpuIdGetExactLeaf(PCPUM pCpum, uint32_t uLeaf, uint32_t uSubLeaf)
594{
595 uint64_t uNeedle = RT_MAKE_U64(uSubLeaf, uLeaf);
596 PCPUMCPUIDLEAF paLeaves = pCpum->GuestInfo.paCpuIdLeavesR3;
597 uint32_t iEnd = pCpum->GuestInfo.cCpuIdLeaves;
598 if (iEnd)
599 {
600 uint32_t iBegin = 0;
601 for (;;)
602 {
603 uint32_t const i = (iEnd - iBegin) / 2 + iBegin;
604 uint64_t const uCur = RT_MAKE_U64(paLeaves[i].uSubLeaf, paLeaves[i].uLeaf);
605 if (uNeedle < uCur)
606 {
607 if (i > iBegin)
608 iEnd = i;
609 else
610 break;
611 }
612 else if (uNeedle > uCur)
613 {
614 if (i + 1 < iEnd)
615 iBegin = i + 1;
616 else
617 break;
618 }
619 else
620 return &paLeaves[i];
621 }
622 }
623 return NULL;
624}
625
626
627/**
628 * Loads MSR range overrides.
629 *
630 * This must be called before the MSR ranges are moved from the normal heap to
631 * the hyper heap!
632 *
633 * @returns VBox status code (VMSetError called).
634 * @param pVM The cross context VM structure.
635 * @param pMsrNode The CFGM node with the MSR overrides.
636 */
637static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
638{
639 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
640 {
641 /*
642 * Assemble a valid MSR range.
643 */
644 CPUMMSRRANGE MsrRange;
645 MsrRange.offCpumCpu = 0;
646 MsrRange.fReserved = 0;
647
648 int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
649 if (RT_FAILURE(rc))
650 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
651
652 rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
653 if (RT_FAILURE(rc))
654 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
655 MsrRange.szName, rc);
656
657 rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
658 if (RT_FAILURE(rc))
659 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
660 MsrRange.szName, rc);
661
662 char szType[32];
663 rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
664 if (RT_FAILURE(rc))
665 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
666 MsrRange.szName, rc);
667 if (!RTStrICmp(szType, "FixedValue"))
668 {
669 MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
670 MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
671
672 rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uValue, 0);
673 if (RT_FAILURE(rc))
674 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
675 MsrRange.szName, rc);
676
677 rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
678 if (RT_FAILURE(rc))
679 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
680 MsrRange.szName, rc);
681
682 rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
683 if (RT_FAILURE(rc))
684 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
685 MsrRange.szName, rc);
686 }
687 else
688 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
689 "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
690
691 /*
692 * Insert the range into the table (replaces/splits/shrinks existing
693 * MSR ranges).
694 */
695 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
696 &MsrRange);
697 if (RT_FAILURE(rc))
698 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
699 }
700
701 return VINF_SUCCESS;
702}
703
704
705/**
706 * Loads CPUID leaf overrides.
707 *
708 * This must be called before the CPUID leaves are moved from the normal
709 * heap to the hyper heap!
710 *
711 * @returns VBox status code (VMSetError called).
712 * @param pVM The cross context VM structure.
713 * @param pParentNode The CFGM node with the CPUID leaves.
714 * @param pszLabel How to label the overrides we're loading.
715 */
716static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
717{
718 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
719 {
720 /*
721 * Get the leaf and subleaf numbers.
722 */
723 char szName[128];
724 int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
725 if (RT_FAILURE(rc))
726 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
727
728 /* The leaf number is either specified directly or thru the node name. */
729 uint32_t uLeaf;
730 rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
731 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
732 {
733 rc = RTStrToUInt32Full(szName, 16, &uLeaf);
734 if (rc != VINF_SUCCESS)
735 return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
736 "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
737 }
738 else if (RT_FAILURE(rc))
739 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
740 pszLabel, szName, rc);
741
742 uint32_t uSubLeaf;
743 rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
744 if (RT_FAILURE(rc))
745 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
746 pszLabel, szName, rc);
747
748 uint32_t fSubLeafMask;
749 rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
750 if (RT_FAILURE(rc))
751 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
752 pszLabel, szName, rc);
753
754 /*
755 * Look up the specified leaf, since the output register values
756 * defaults to any existing values. This allows overriding a single
757 * register, without needing to know the other values.
758 */
759 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, uLeaf, uSubLeaf);
760 CPUMCPUIDLEAF Leaf;
761 if (pLeaf)
762 Leaf = *pLeaf;
763 else
764 RT_ZERO(Leaf);
765 Leaf.uLeaf = uLeaf;
766 Leaf.uSubLeaf = uSubLeaf;
767 Leaf.fSubLeafMask = fSubLeafMask;
768
769 rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
770 if (RT_FAILURE(rc))
771 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
772 pszLabel, szName, rc);
773 rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
774 if (RT_FAILURE(rc))
775 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
776 pszLabel, szName, rc);
777 rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
778 if (RT_FAILURE(rc))
779 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
780 pszLabel, szName, rc);
781 rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
782 if (RT_FAILURE(rc))
783 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
784 pszLabel, szName, rc);
785
786 /*
787 * Insert the leaf into the table (replaces existing ones).
788 */
789 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves,
790 &Leaf);
791 if (RT_FAILURE(rc))
792 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
793 }
794
795 return VINF_SUCCESS;
796}
797
798
799
800/**
801 * Fetches overrides for a CPUID leaf.
802 *
803 * @returns VBox status code.
804 * @param pLeaf The leaf to load the overrides into.
805 * @param pCfgNode The CFGM node containing the overrides
806 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
807 * @param iLeaf The CPUID leaf number.
808 */
809static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)
810{
811 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);
812 if (pLeafNode)
813 {
814 uint32_t u32;
815 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
816 if (RT_SUCCESS(rc))
817 pLeaf->uEax = u32;
818 else
819 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
820
821 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
822 if (RT_SUCCESS(rc))
823 pLeaf->uEbx = u32;
824 else
825 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
826
827 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
828 if (RT_SUCCESS(rc))
829 pLeaf->uEcx = u32;
830 else
831 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
832
833 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
834 if (RT_SUCCESS(rc))
835 pLeaf->uEdx = u32;
836 else
837 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
838
839 }
840 return VINF_SUCCESS;
841}
842
843
844/**
845 * Load the overrides for a set of CPUID leaves.
846 *
847 * @returns VBox status code.
848 * @param paLeaves The leaf array.
849 * @param cLeaves The number of leaves.
850 * @param uStart The start leaf number.
851 * @param pCfgNode The CFGM node containing the overrides
852 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
853 */
854static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
855{
856 for (uint32_t i = 0; i < cLeaves; i++)
857 {
858 int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i);
859 if (RT_FAILURE(rc))
860 return rc;
861 }
862
863 return VINF_SUCCESS;
864}
865
866
867/**
868 * Installs the CPUID leaves and explods the data into structures like
869 * GuestFeatures and CPUMCTX::aoffXState.
870 *
871 * @returns VBox status code.
872 * @param pVM The cross context VM structure.
873 * @param pCpum The CPUM part of @a VM.
874 * @param paLeaves The leaves. These will be copied (but not freed).
875 * @param cLeaves The number of leaves.
876 * @param pMsrs The MSRs.
877 */
878static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCpum, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
879{
880# ifdef VBOX_STRICT
881 cpumCpuIdAssertOrder(paLeaves, cLeaves);
882# endif
883
884 /*
885 * Install the CPUID information.
886 */
887 AssertLogRelMsgReturn(cLeaves <= RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves),
888 ("cLeaves=%u - max %u\n", cLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves)),
889 VERR_CPUM_IPE_1); /** @todo better status! */
890 if (paLeaves != pCpum->GuestInfo.aCpuIdLeaves)
891 memcpy(pCpum->GuestInfo.aCpuIdLeaves, paLeaves, cLeaves * sizeof(paLeaves[0]));
892 pCpum->GuestInfo.paCpuIdLeavesR3 = pCpum->GuestInfo.aCpuIdLeaves;
893 pCpum->GuestInfo.cCpuIdLeaves = cLeaves;
894
895 /*
896 * Update the default CPUID leaf if necessary.
897 */
898 switch (pCpum->GuestInfo.enmUnknownCpuIdMethod)
899 {
900 case CPUMUNKNOWNCPUID_LAST_STD_LEAF:
901 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX:
902 {
903 /* We don't use CPUID(0).eax here because of the NT hack that only
904 changes that value without actually removing any leaves. */
905 uint32_t i = 0;
906 if ( pCpum->GuestInfo.cCpuIdLeaves > 0
907 && pCpum->GuestInfo.paCpuIdLeavesR3[0].uLeaf <= UINT32_C(0xff))
908 {
909 while ( i + 1 < pCpum->GuestInfo.cCpuIdLeaves
910 && pCpum->GuestInfo.paCpuIdLeavesR3[i + 1].uLeaf <= UINT32_C(0xff))
911 i++;
912 pCpum->GuestInfo.DefCpuId.uEax = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEax;
913 pCpum->GuestInfo.DefCpuId.uEbx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEbx;
914 pCpum->GuestInfo.DefCpuId.uEcx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEcx;
915 pCpum->GuestInfo.DefCpuId.uEdx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEdx;
916 }
917 break;
918 }
919 default:
920 break;
921 }
922
923 /*
924 * Explode the guest CPU features.
925 */
926 int rc = cpumCpuIdExplodeFeaturesX86(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, pMsrs,
927 &pCpum->GuestFeatures);
928 AssertLogRelRCReturn(rc, rc);
929
930 /*
931 * Adjust the scalable bus frequency according to the CPUID information
932 * we're now using.
933 */
934 if (CPUMMICROARCH_IS_INTEL_CORE7(pVM->cpum.s.GuestFeatures.enmMicroarch))
935 pCpum->GuestInfo.uScalableBusFreq = pCpum->GuestFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
936 ? UINT64_C(100000000) /* 100MHz */
937 : UINT64_C(133333333); /* 133MHz */
938
939 /*
940 * Populate the legacy arrays. Currently used for everything, later only
941 * for patch manager.
942 */
943 struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
944 {
945 { pCpum->aGuestCpuIdPatmStd, RT_ELEMENTS(pCpum->aGuestCpuIdPatmStd), 0x00000000 },
946 { pCpum->aGuestCpuIdPatmExt, RT_ELEMENTS(pCpum->aGuestCpuIdPatmExt), 0x80000000 },
947 { pCpum->aGuestCpuIdPatmCentaur, RT_ELEMENTS(pCpum->aGuestCpuIdPatmCentaur), 0xc0000000 },
948 };
949 for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
950 {
951 uint32_t cLeft = aOldRanges[i].cCpuIds;
952 uint32_t uLeaf = aOldRanges[i].uBase + cLeft;
953 PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
954 while (cLeft-- > 0)
955 {
956 uLeaf--;
957 pLegacyLeaf--;
958
959 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(pCpum, uLeaf, 0 /* uSubLeaf */);
960 if (pLeaf)
961 {
962 pLegacyLeaf->uEax = pLeaf->uEax;
963 pLegacyLeaf->uEbx = pLeaf->uEbx;
964 pLegacyLeaf->uEcx = pLeaf->uEcx;
965 pLegacyLeaf->uEdx = pLeaf->uEdx;
966 }
967 else
968 *pLegacyLeaf = pCpum->GuestInfo.DefCpuId;
969 }
970 }
971
972 /*
973 * Configure XSAVE offsets according to the CPUID info and set the feature flags.
974 */
975 PVMCPU pVCpu0 = pVM->apCpusR3[0];
976 AssertCompile(sizeof(pVCpu0->cpum.s.Guest.abXState) == CPUM_MAX_XSAVE_AREA_SIZE);
977 memset(&pVCpu0->cpum.s.Guest.aoffXState[0], 0xff, sizeof(pVCpu0->cpum.s.Guest.aoffXState));
978 pVCpu0->cpum.s.Guest.aoffXState[XSAVE_C_X87_BIT] = 0;
979 pVCpu0->cpum.s.Guest.aoffXState[XSAVE_C_SSE_BIT] = 0;
980 for (uint32_t iComponent = XSAVE_C_SSE_BIT + 1; iComponent < 63; iComponent++)
981 if (pCpum->fXStateGuestMask & RT_BIT_64(iComponent))
982 {
983 PCPUMCPUIDLEAF pSubLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0xd, iComponent);
984 AssertLogRelMsgReturn(pSubLeaf, ("iComponent=%#x\n", iComponent), VERR_CPUM_IPE_1);
985 AssertLogRelMsgReturn(pSubLeaf->fSubLeafMask >= iComponent, ("iComponent=%#x\n", iComponent), VERR_CPUM_IPE_1);
986 AssertLogRelMsgReturn( pSubLeaf->uEax > 0
987 && pSubLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE
988 && pSubLeaf->uEax <= pCpum->GuestFeatures.cbMaxExtendedState
989 && pSubLeaf->uEbx <= pCpum->GuestFeatures.cbMaxExtendedState
990 && pSubLeaf->uEbx + pSubLeaf->uEax <= pCpum->GuestFeatures.cbMaxExtendedState,
991 ("iComponent=%#x eax=%#x ebx=%#x cbMax=%#x\n", iComponent, pSubLeaf->uEax, pSubLeaf->uEbx,
992 pCpum->GuestFeatures.cbMaxExtendedState),
993 VERR_CPUM_IPE_1);
994 pVCpu0->cpum.s.Guest.aoffXState[iComponent] = pSubLeaf->uEbx;
995 }
996
997 /* Copy the CPU #0 data to the other CPUs. */
998 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
999 {
1000 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1001 memcpy(&pVCpu->cpum.s.Guest.aoffXState[0], &pVCpu0->cpum.s.Guest.aoffXState[0], sizeof(pVCpu0->cpum.s.Guest.aoffXState));
1002 }
1003
1004 return VINF_SUCCESS;
1005}
1006
1007
1008/** @name Instruction Set Extension Options
1009 * @{ */
1010/** Configuration option type (extended boolean, really). */
1011typedef uint8_t CPUMISAEXTCFG;
1012/** Always disable the extension. */
1013#define CPUMISAEXTCFG_DISABLED false
1014/** Enable the extension if it's supported by the host CPU. */
1015#define CPUMISAEXTCFG_ENABLED_SUPPORTED true
1016/** Enable the extension if it's supported by the host CPU, but don't let
1017 * the portable CPUID feature disable it. */
1018#define CPUMISAEXTCFG_ENABLED_PORTABLE UINT8_C(127)
1019/** Always enable the extension. */
1020#define CPUMISAEXTCFG_ENABLED_ALWAYS UINT8_C(255)
1021/** @} */
1022
1023/**
1024 * CPUID Configuration (from CFGM).
1025 *
1026 * @remarks The members aren't document since we would only be duplicating the
1027 * \@cfgm entries in cpumR3CpuIdReadConfig.
1028 */
1029typedef struct CPUMCPUIDCONFIG
1030{
1031 bool fNt4LeafLimit;
1032 bool fInvariantTsc;
1033 bool fForceVme;
1034 bool fNestedHWVirt;
1035
1036 CPUMISAEXTCFG enmCmpXchg16b;
1037 CPUMISAEXTCFG enmMonitor;
1038 CPUMISAEXTCFG enmMWaitExtensions;
1039 CPUMISAEXTCFG enmSse41;
1040 CPUMISAEXTCFG enmSse42;
1041 CPUMISAEXTCFG enmAvx;
1042 CPUMISAEXTCFG enmAvx2;
1043 CPUMISAEXTCFG enmXSave;
1044 CPUMISAEXTCFG enmAesNi;
1045 CPUMISAEXTCFG enmPClMul;
1046 CPUMISAEXTCFG enmPopCnt;
1047 CPUMISAEXTCFG enmMovBe;
1048 CPUMISAEXTCFG enmRdRand;
1049 CPUMISAEXTCFG enmRdSeed;
1050 CPUMISAEXTCFG enmSha;
1051 CPUMISAEXTCFG enmAdx;
1052 CPUMISAEXTCFG enmCLFlushOpt;
1053 CPUMISAEXTCFG enmFsGsBase;
1054 CPUMISAEXTCFG enmPcid;
1055 CPUMISAEXTCFG enmInvpcid;
1056 CPUMISAEXTCFG enmFlushCmdMsr;
1057 CPUMISAEXTCFG enmMdsClear;
1058 CPUMISAEXTCFG enmArchCapMsr;
1059
1060 CPUMISAEXTCFG enmAbm;
1061 CPUMISAEXTCFG enmSse4A;
1062 CPUMISAEXTCFG enmMisAlnSse;
1063 CPUMISAEXTCFG enm3dNowPrf;
1064 CPUMISAEXTCFG enmAmdExtMmx;
1065
1066 uint32_t uMaxStdLeaf;
1067 uint32_t uMaxExtLeaf;
1068 uint32_t uMaxCentaurLeaf;
1069 uint32_t uMaxIntelFamilyModelStep;
1070 char szCpuName[128];
1071} CPUMCPUIDCONFIG;
1072/** Pointer to CPUID config (from CFGM). */
1073typedef CPUMCPUIDCONFIG *PCPUMCPUIDCONFIG;
1074
1075
1076/**
1077 * Mini CPU selection support for making Mac OS X happy.
1078 *
1079 * Executes the /CPUM/MaxIntelFamilyModelStep config.
1080 *
1081 * @param pCpum The CPUM instance data.
1082 * @param pConfig The CPUID configuration we've read from CFGM.
1083 */
1084static void cpumR3CpuIdLimitIntelFamModStep(PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
1085{
1086 if (pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1087 {
1088 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
1089 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(RTX86GetCpuStepping(pStdFeatureLeaf->uEax),
1090 RTX86GetCpuModelIntel(pStdFeatureLeaf->uEax),
1091 RTX86GetCpuFamily(pStdFeatureLeaf->uEax),
1092 0);
1093 uint32_t uMaxIntelFamilyModelStep = pConfig->uMaxIntelFamilyModelStep;
1094 if (pConfig->uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
1095 {
1096 uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
1097 uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
1098 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
1099 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) >> 4) << 16; /* 4 high model bits */
1100 uNew |= (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf) << 8; /* 4 low family bits */
1101 if (RT_BYTE3(uMaxIntelFamilyModelStep) > 0xf) /* 8 high family bits, using intel's suggested calculation. */
1102 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
1103 LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
1104 pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
1105 pStdFeatureLeaf->uEax = uNew;
1106 }
1107 }
1108}
1109
1110
1111
1112/**
1113 * Limit it the number of entries, zapping the remainder.
1114 *
1115 * The limits are masking off stuff about power saving and similar, this
1116 * is perhaps a bit crudely done as there is probably some relatively harmless
1117 * info too in these leaves (like words about having a constant TSC).
1118 *
1119 * @param pCpum The CPUM instance data.
1120 * @param pConfig The CPUID configuration we've read from CFGM.
1121 */
1122static void cpumR3CpuIdLimitLeaves(PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
1123{
1124 /*
1125 * Standard leaves.
1126 */
1127 uint32_t uSubLeaf = 0;
1128 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0, uSubLeaf);
1129 if (pCurLeaf)
1130 {
1131 uint32_t uLimit = pCurLeaf->uEax;
1132 if (uLimit <= UINT32_C(0x000fffff))
1133 {
1134 if (uLimit > pConfig->uMaxStdLeaf)
1135 {
1136 pCurLeaf->uEax = uLimit = pConfig->uMaxStdLeaf;
1137 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1138 uLimit + 1, UINT32_C(0x000fffff));
1139 }
1140
1141 /* NT4 hack, no zapping of extra leaves here. */
1142 if (pConfig->fNt4LeafLimit && uLimit > 3)
1143 pCurLeaf->uEax = uLimit = 3;
1144
1145 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x00000000), ++uSubLeaf)) != NULL)
1146 pCurLeaf->uEax = uLimit;
1147 }
1148 else
1149 {
1150 LogRel(("CPUID: Invalid standard range: %#x\n", uLimit));
1151 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1152 UINT32_C(0x00000000), UINT32_C(0x0fffffff));
1153 }
1154 }
1155
1156 /*
1157 * Extended leaves.
1158 */
1159 uSubLeaf = 0;
1160 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), uSubLeaf);
1161 if (pCurLeaf)
1162 {
1163 uint32_t uLimit = pCurLeaf->uEax;
1164 if ( uLimit >= UINT32_C(0x80000000)
1165 && uLimit <= UINT32_C(0x800fffff))
1166 {
1167 if (uLimit > pConfig->uMaxExtLeaf)
1168 {
1169 pCurLeaf->uEax = uLimit = pConfig->uMaxExtLeaf;
1170 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1171 uLimit + 1, UINT32_C(0x800fffff));
1172 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), ++uSubLeaf)) != NULL)
1173 pCurLeaf->uEax = uLimit;
1174 }
1175 }
1176 else
1177 {
1178 LogRel(("CPUID: Invalid extended range: %#x\n", uLimit));
1179 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1180 UINT32_C(0x80000000), UINT32_C(0x8ffffffd));
1181 }
1182 }
1183
1184 /*
1185 * Centaur leaves (VIA).
1186 */
1187 uSubLeaf = 0;
1188 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000000), uSubLeaf);
1189 if (pCurLeaf)
1190 {
1191 uint32_t uLimit = pCurLeaf->uEax;
1192 if ( uLimit >= UINT32_C(0xc0000000)
1193 && uLimit <= UINT32_C(0xc00fffff))
1194 {
1195 if (uLimit > pConfig->uMaxCentaurLeaf)
1196 {
1197 pCurLeaf->uEax = uLimit = pConfig->uMaxCentaurLeaf;
1198 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1199 uLimit + 1, UINT32_C(0xcfffffff));
1200 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000000), ++uSubLeaf)) != NULL)
1201 pCurLeaf->uEax = uLimit;
1202 }
1203 }
1204 else
1205 {
1206 LogRel(("CPUID: Invalid centaur range: %#x\n", uLimit));
1207 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1208 UINT32_C(0xc0000000), UINT32_C(0xcfffffff));
1209 }
1210 }
1211}
1212
1213
1214/**
1215 * Clears a CPUID leaf and all sub-leaves (to zero).
1216 *
1217 * @param pCpum The CPUM instance data.
1218 * @param uLeaf The leaf to clear.
1219 */
1220static void cpumR3CpuIdZeroLeaf(PCPUM pCpum, uint32_t uLeaf)
1221{
1222 uint32_t uSubLeaf = 0;
1223 PCPUMCPUIDLEAF pCurLeaf;
1224 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, uLeaf, uSubLeaf)) != NULL)
1225 {
1226 pCurLeaf->uEax = 0;
1227 pCurLeaf->uEbx = 0;
1228 pCurLeaf->uEcx = 0;
1229 pCurLeaf->uEdx = 0;
1230 uSubLeaf++;
1231 }
1232}
1233
1234
1235/**
1236 * Used by cpumR3CpuIdSanitize to ensure that we don't have any sub-leaves for
1237 * the given leaf.
1238 *
1239 * @returns pLeaf.
1240 * @param pCpum The CPUM instance data.
1241 * @param pLeaf The leaf to ensure is alone with it's EAX input value.
1242 */
1243static PCPUMCPUIDLEAF cpumR3CpuIdMakeSingleLeaf(PCPUM pCpum, PCPUMCPUIDLEAF pLeaf)
1244{
1245 Assert((uintptr_t)(pLeaf - pCpum->GuestInfo.paCpuIdLeavesR3) < pCpum->GuestInfo.cCpuIdLeaves);
1246 if (pLeaf->fSubLeafMask != 0)
1247 {
1248 /*
1249 * Figure out how many sub-leaves in need of removal (we'll keep the first).
1250 * Log everything while we're at it.
1251 */
1252 LogRel(("CPUM:\n"
1253 "CPUM: Unexpected CPUID sub-leaves for leaf %#x; fSubLeafMask=%#x\n", pLeaf->uLeaf, pLeaf->fSubLeafMask));
1254 PCPUMCPUIDLEAF pLast = &pCpum->GuestInfo.paCpuIdLeavesR3[pCpum->GuestInfo.cCpuIdLeaves - 1];
1255 PCPUMCPUIDLEAF pSubLeaf = pLeaf;
1256 for (;;)
1257 {
1258 LogRel(("CPUM: %08x/%08x: %08x %08x %08x %08x; flags=%#x mask=%#x\n",
1259 pSubLeaf->uLeaf, pSubLeaf->uSubLeaf,
1260 pSubLeaf->uEax, pSubLeaf->uEbx, pSubLeaf->uEcx, pSubLeaf->uEdx,
1261 pSubLeaf->fFlags, pSubLeaf->fSubLeafMask));
1262 if (pSubLeaf == pLast || pSubLeaf[1].uLeaf != pLeaf->uLeaf)
1263 break;
1264 pSubLeaf++;
1265 }
1266 LogRel(("CPUM:\n"));
1267
1268 /*
1269 * Remove the offending sub-leaves.
1270 */
1271 if (pSubLeaf != pLeaf)
1272 {
1273 if (pSubLeaf != pLast)
1274 memmove(pLeaf + 1, pSubLeaf + 1, (uintptr_t)pLast - (uintptr_t)pSubLeaf);
1275 pCpum->GuestInfo.cCpuIdLeaves -= (uint32_t)(pSubLeaf - pLeaf);
1276 }
1277
1278 /*
1279 * Convert the first sub-leaf into a single leaf.
1280 */
1281 pLeaf->uSubLeaf = 0;
1282 pLeaf->fSubLeafMask = 0;
1283 }
1284 return pLeaf;
1285}
1286
1287
1288/**
1289 * Sanitizes and adjust the CPUID leaves.
1290 *
1291 * Drop features that aren't virtualized (or virtualizable). Adjust information
1292 * and capabilities to fit the virtualized hardware. Remove information the
1293 * guest shouldn't have (because it's wrong in the virtual world or because it
1294 * gives away host details) or that we don't have documentation for and no idea
1295 * what means.
1296 *
1297 * @returns VBox status code.
1298 * @param pVM The cross context VM structure (for cCpus).
1299 * @param pCpum The CPUM instance data.
1300 * @param pConfig The CPUID configuration we've read from CFGM.
1301 */
1302static int cpumR3CpuIdSanitize(PVM pVM, PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
1303{
1304#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
1305 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
1306 { \
1307 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
1308 (a_pLeafReg) &= ~(uint32_t)(fMask); \
1309 }
1310#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
1311 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
1312 { \
1313 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
1314 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
1315 }
1316#define PORTABLE_DISABLE_FEATURE_BIT_CFG(Lvl, a_pLeafReg, FeatNm, fBitMask, enmConfig) \
1317 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) \
1318 && ((a_pLeafReg) & (fBitMask)) \
1319 && (enmConfig) != CPUMISAEXTCFG_ENABLED_PORTABLE ) \
1320 { \
1321 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
1322 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
1323 }
1324 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
1325
1326 /* The CPUID entries we start with here isn't necessarily the ones of the host, so we
1327 must consult HostFeatures when processing CPUMISAEXTCFG variables. */
1328 PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures;
1329#define PASSTHRU_FEATURE(enmConfig, fHostFeature, fConst) \
1330 ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) ? (fConst) : 0)
1331#define PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, fAndExpr, fConst) \
1332 ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) && (fAndExpr) ? (fConst) : 0)
1333#define PASSTHRU_FEATURE_TODO(enmConfig, fConst) ((enmConfig) ? (fConst) : 0)
1334
1335 /* Cpuid 1:
1336 * EAX: CPU model, family and stepping.
1337 *
1338 * ECX + EDX: Supported features. Only report features we can support.
1339 * Note! When enabling new features the Synthetic CPU and Portable CPUID
1340 * options may require adjusting (i.e. stripping what was enabled).
1341 *
1342 * EBX: Branding, CLFLUSH line size, logical processors per package and
1343 * initial APIC ID.
1344 */
1345 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0); /* Note! Must refetch when used later. */
1346 AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
1347 pStdFeatureLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pStdFeatureLeaf);
1348
1349 pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU
1350 | X86_CPUID_FEATURE_EDX_VME
1351 | X86_CPUID_FEATURE_EDX_DE
1352 | X86_CPUID_FEATURE_EDX_PSE
1353 | X86_CPUID_FEATURE_EDX_TSC
1354 | X86_CPUID_FEATURE_EDX_MSR
1355 //| X86_CPUID_FEATURE_EDX_PAE - set later if configured.
1356 | X86_CPUID_FEATURE_EDX_MCE
1357 | X86_CPUID_FEATURE_EDX_CX8
1358 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
1359 //| RT_BIT_32(10) - not defined
1360 | X86_CPUID_FEATURE_EDX_SEP
1361 | X86_CPUID_FEATURE_EDX_MTRR
1362 | X86_CPUID_FEATURE_EDX_PGE
1363 | X86_CPUID_FEATURE_EDX_MCA
1364 | X86_CPUID_FEATURE_EDX_CMOV
1365 | X86_CPUID_FEATURE_EDX_PAT /* 16 */
1366 | X86_CPUID_FEATURE_EDX_PSE36
1367 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
1368 | X86_CPUID_FEATURE_EDX_CLFSH
1369 //| RT_BIT_32(20) - not defined
1370 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
1371 //| X86_CPUID_FEATURE_EDX_ACPI - not supported (not DevAcpi, right?).
1372 | X86_CPUID_FEATURE_EDX_MMX
1373 | X86_CPUID_FEATURE_EDX_FXSR
1374 | X86_CPUID_FEATURE_EDX_SSE
1375 | X86_CPUID_FEATURE_EDX_SSE2
1376 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
1377 | X86_CPUID_FEATURE_EDX_HTT
1378 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
1379 //| RT_BIT_32(30) - not defined
1380 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
1381 ;
1382 pStdFeatureLeaf->uEcx &= X86_CPUID_FEATURE_ECX_SSE3
1383 | PASSTHRU_FEATURE_TODO(pConfig->enmPClMul, X86_CPUID_FEATURE_ECX_PCLMUL)
1384 //| X86_CPUID_FEATURE_ECX_DTES64 - not implemented yet.
1385 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
1386 | PASSTHRU_FEATURE_EX(pConfig->enmMonitor, pHstFeat->fMonitorMWait, pVM->cCpus == 1, X86_CPUID_FEATURE_ECX_MONITOR)
1387 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
1388 | (pConfig->fNestedHWVirt ? X86_CPUID_FEATURE_ECX_VMX : 0)
1389 //| X86_CPUID_FEATURE_ECX_SMX - not virtualized yet.
1390 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
1391 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
1392 | X86_CPUID_FEATURE_ECX_SSSE3
1393 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
1394 //| X86_CPUID_FEATURE_ECX_FMA - not implemented yet.
1395 | PASSTHRU_FEATURE(pConfig->enmCmpXchg16b, pHstFeat->fMovCmpXchg16b, X86_CPUID_FEATURE_ECX_CX16)
1396 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
1397 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
1398 //| X86_CPUID_FEATURE_ECX_PDCM - not implemented yet.
1399 | PASSTHRU_FEATURE(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)
1400 //| X86_CPUID_FEATURE_ECX_DCA - not implemented yet.
1401 | PASSTHRU_FEATURE(pConfig->enmSse41, pHstFeat->fSse41, X86_CPUID_FEATURE_ECX_SSE4_1)
1402 | PASSTHRU_FEATURE(pConfig->enmSse42, pHstFeat->fSse42, X86_CPUID_FEATURE_ECX_SSE4_2)
1403 //| X86_CPUID_FEATURE_ECX_X2APIC - turned on later by the device if enabled.
1404 | PASSTHRU_FEATURE_TODO(pConfig->enmMovBe, X86_CPUID_FEATURE_ECX_MOVBE)
1405 | PASSTHRU_FEATURE(pConfig->enmPopCnt, pHstFeat->fPopCnt, X86_CPUID_FEATURE_ECX_POPCNT)
1406 //| X86_CPUID_FEATURE_ECX_TSCDEADL - not implemented yet.
1407 | PASSTHRU_FEATURE_TODO(pConfig->enmAesNi, X86_CPUID_FEATURE_ECX_AES)
1408 | PASSTHRU_FEATURE(pConfig->enmXSave, pHstFeat->fXSaveRstor, X86_CPUID_FEATURE_ECX_XSAVE)
1409 //| X86_CPUID_FEATURE_ECX_OSXSAVE - mirrors CR4.OSXSAVE state, set dynamically.
1410 | PASSTHRU_FEATURE(pConfig->enmAvx, pHstFeat->fAvx, X86_CPUID_FEATURE_ECX_AVX)
1411 //| X86_CPUID_FEATURE_ECX_F16C - not implemented yet.
1412 | PASSTHRU_FEATURE_TODO(pConfig->enmRdRand, X86_CPUID_FEATURE_ECX_RDRAND)
1413 //| X86_CPUID_FEATURE_ECX_HVP - Set explicitly later.
1414 ;
1415
1416 /* Mask out PCID unless FSGSBASE is exposed due to a bug in Windows 10 SMP guests, see @bugref{9089#c15}. */
1417 if ( !pVM->cpum.s.GuestFeatures.fFsGsBase
1418 && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_PCID))
1419 {
1420 pStdFeatureLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_PCID;
1421 LogRel(("CPUM: Disabled PCID without FSGSBASE to workaround buggy guests\n"));
1422 }
1423
1424 if (pCpum->u8PortableCpuIdLevel > 0)
1425 {
1426 PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
1427 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
1428 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, PCID, X86_CPUID_FEATURE_ECX_PCID, pConfig->enmPcid);
1429 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, SSE4_1, X86_CPUID_FEATURE_ECX_SSE4_1, pConfig->enmSse41);
1430 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, SSE4_2, X86_CPUID_FEATURE_ECX_SSE4_2, pConfig->enmSse42);
1431 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, MOVBE, X86_CPUID_FEATURE_ECX_MOVBE, pConfig->enmMovBe);
1432 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, AES, X86_CPUID_FEATURE_ECX_AES);
1433 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, VMX, X86_CPUID_FEATURE_ECX_VMX);
1434 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, PCLMUL, X86_CPUID_FEATURE_ECX_PCLMUL, pConfig->enmPClMul);
1435 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, POPCNT, X86_CPUID_FEATURE_ECX_POPCNT, pConfig->enmPopCnt);
1436 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, F16C, X86_CPUID_FEATURE_ECX_F16C);
1437 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, XSAVE, X86_CPUID_FEATURE_ECX_XSAVE, pConfig->enmXSave);
1438 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, AVX, X86_CPUID_FEATURE_ECX_AVX, pConfig->enmAvx);
1439 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, RDRAND, X86_CPUID_FEATURE_ECX_RDRAND, pConfig->enmRdRand);
1440 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16, pConfig->enmCmpXchg16b);
1441 PORTABLE_DISABLE_FEATURE_BIT( 2, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
1442 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
1443 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE);
1444 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
1445 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
1446
1447 Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP ///??
1448 | X86_CPUID_FEATURE_EDX_PSN
1449 | X86_CPUID_FEATURE_EDX_DS
1450 | X86_CPUID_FEATURE_EDX_ACPI
1451 | X86_CPUID_FEATURE_EDX_SS
1452 | X86_CPUID_FEATURE_EDX_TM
1453 | X86_CPUID_FEATURE_EDX_PBE
1454 )));
1455 Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_DTES64
1456 | X86_CPUID_FEATURE_ECX_CPLDS
1457 | X86_CPUID_FEATURE_ECX_AES
1458 | X86_CPUID_FEATURE_ECX_VMX
1459 | X86_CPUID_FEATURE_ECX_SMX
1460 | X86_CPUID_FEATURE_ECX_EST
1461 | X86_CPUID_FEATURE_ECX_TM2
1462 | X86_CPUID_FEATURE_ECX_CNTXID
1463 | X86_CPUID_FEATURE_ECX_FMA
1464 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1465 | X86_CPUID_FEATURE_ECX_PDCM
1466 | X86_CPUID_FEATURE_ECX_DCA
1467 | X86_CPUID_FEATURE_ECX_OSXSAVE
1468 )));
1469 }
1470
1471 /* Set up APIC ID for CPU 0, configure multi core/threaded smp. */
1472 pStdFeatureLeaf->uEbx &= UINT32_C(0x0000ffff); /* (APIC-ID := 0 and #LogCpus := 0) */
1473
1474 /* The HTT bit is architectural and does not directly indicate hyper-threading or multiple cores;
1475 * it was set even on single-core/non-HT Northwood P4s for example. The HTT bit only means that the
1476 * information in EBX[23:16] (max number of addressable logical processor IDs) is valid.
1477 */
1478#ifdef VBOX_WITH_MULTI_CORE
1479 if (pVM->cCpus > 1)
1480 pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* Force if emulating a multi-core CPU. */
1481#endif
1482 if (pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_HTT)
1483 {
1484 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU
1485 core times the number of CPU cores per processor */
1486#ifdef VBOX_WITH_MULTI_CORE
1487 pStdFeatureLeaf->uEbx |= pVM->cCpus <= 0xff ? (pVM->cCpus << 16) : UINT32_C(0x00ff0000);
1488#else
1489 /* Single logical processor in a package. */
1490 pStdFeatureLeaf->uEbx |= (1 << 16);
1491#endif
1492 }
1493
1494 uint32_t uMicrocodeRev;
1495 int rc = SUPR3QueryMicrocodeRev(&uMicrocodeRev);
1496 if (RT_SUCCESS(rc))
1497 {
1498 LogRel(("CPUM: Microcode revision 0x%08X\n", uMicrocodeRev));
1499 }
1500 else
1501 {
1502 uMicrocodeRev = 0;
1503 LogRel(("CPUM: Failed to query microcode revision. rc=%Rrc\n", rc));
1504 }
1505
1506 /* Mask out the VME capability on certain CPUs, unless overridden by fForceVme.
1507 * VME bug was fixed in AGESA 1.0.0.6, microcode patch level 8001126.
1508 */
1509 if ( ( pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_AMD_Zen_Ryzen
1510 /** @todo The following ASSUMES that Hygon uses the same version numbering
1511 * as AMD and that they shipped buggy firmware. */
1512 || pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_Hygon_Dhyana)
1513 && uMicrocodeRev < 0x8001126
1514 && !pConfig->fForceVme)
1515 {
1516 /** @todo The above is a very coarse test but at the moment we don't know any better (see @bugref{8852}). */
1517 LogRel(("CPUM: Zen VME workaround engaged\n"));
1518 pStdFeatureLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_VME;
1519 }
1520
1521 /* Force standard feature bits. */
1522 if (pConfig->enmPClMul == CPUMISAEXTCFG_ENABLED_ALWAYS)
1523 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_PCLMUL;
1524 if (pConfig->enmMonitor == CPUMISAEXTCFG_ENABLED_ALWAYS)
1525 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_MONITOR;
1526 if (pConfig->enmCmpXchg16b == CPUMISAEXTCFG_ENABLED_ALWAYS)
1527 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_CX16;
1528 if (pConfig->enmSse41 == CPUMISAEXTCFG_ENABLED_ALWAYS)
1529 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_SSE4_1;
1530 if (pConfig->enmSse42 == CPUMISAEXTCFG_ENABLED_ALWAYS)
1531 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_SSE4_2;
1532 if (pConfig->enmMovBe == CPUMISAEXTCFG_ENABLED_ALWAYS)
1533 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_MOVBE;
1534 if (pConfig->enmPopCnt == CPUMISAEXTCFG_ENABLED_ALWAYS)
1535 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_POPCNT;
1536 if (pConfig->enmAesNi == CPUMISAEXTCFG_ENABLED_ALWAYS)
1537 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AES;
1538 if (pConfig->enmXSave == CPUMISAEXTCFG_ENABLED_ALWAYS)
1539 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_XSAVE;
1540 if (pConfig->enmAvx == CPUMISAEXTCFG_ENABLED_ALWAYS)
1541 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AVX;
1542 if (pConfig->enmRdRand == CPUMISAEXTCFG_ENABLED_ALWAYS)
1543 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_RDRAND;
1544
1545 pStdFeatureLeaf = NULL; /* Must refetch! */
1546
1547 /* Cpuid 0x80000001: (Similar, but in no way identical to 0x00000001.)
1548 * AMD:
1549 * EAX: CPU model, family and stepping.
1550 *
1551 * ECX + EDX: Supported features. Only report features we can support.
1552 * Note! When enabling new features the Synthetic CPU and Portable CPUID
1553 * options may require adjusting (i.e. stripping what was enabled).
1554 * ASSUMES that this is ALWAYS the AMD defined feature set if present.
1555 *
1556 * EBX: Branding ID and package type (or reserved).
1557 *
1558 * Intel and probably most others:
1559 * EAX: 0
1560 * EBX: 0
1561 * ECX + EDX: Subset of AMD features, mainly for AMD64 support.
1562 */
1563 PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
1564 if (pExtFeatureLeaf)
1565 {
1566 pExtFeatureLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pExtFeatureLeaf);
1567
1568 pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU
1569 | X86_CPUID_AMD_FEATURE_EDX_VME
1570 | X86_CPUID_AMD_FEATURE_EDX_DE
1571 | X86_CPUID_AMD_FEATURE_EDX_PSE
1572 | X86_CPUID_AMD_FEATURE_EDX_TSC
1573 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
1574 //| X86_CPUID_AMD_FEATURE_EDX_PAE - turned on when necessary
1575 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
1576 | X86_CPUID_AMD_FEATURE_EDX_CX8
1577 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
1578 //| RT_BIT_32(10) - reserved
1579 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
1580 | X86_CPUID_AMD_FEATURE_EDX_MTRR
1581 | X86_CPUID_AMD_FEATURE_EDX_PGE
1582 | X86_CPUID_AMD_FEATURE_EDX_MCA
1583 | X86_CPUID_AMD_FEATURE_EDX_CMOV
1584 | X86_CPUID_AMD_FEATURE_EDX_PAT
1585 | X86_CPUID_AMD_FEATURE_EDX_PSE36
1586 //| RT_BIT_32(18) - reserved
1587 //| RT_BIT_32(19) - reserved
1588 | X86_CPUID_EXT_FEATURE_EDX_NX
1589 //| RT_BIT_32(21) - reserved
1590 | PASSTHRU_FEATURE(pConfig->enmAmdExtMmx, pHstFeat->fAmdMmxExts, X86_CPUID_AMD_FEATURE_EDX_AXMMX)
1591 | X86_CPUID_AMD_FEATURE_EDX_MMX
1592 | X86_CPUID_AMD_FEATURE_EDX_FXSR
1593 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
1594 //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
1595 | X86_CPUID_EXT_FEATURE_EDX_RDTSCP
1596 //| RT_BIT_32(28) - reserved
1597 //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary
1598 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
1599 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
1600 ;
1601 pExtFeatureLeaf->uEcx &= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
1602 //| X86_CPUID_AMD_FEATURE_ECX_CMPL - set below if applicable.
1603 | (pConfig->fNestedHWVirt ? X86_CPUID_AMD_FEATURE_ECX_SVM : 0)
1604 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
1605 /* Note: This could prevent teleporting from AMD to Intel CPUs! */
1606 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
1607 | PASSTHRU_FEATURE(pConfig->enmAbm, pHstFeat->fAbm, X86_CPUID_AMD_FEATURE_ECX_ABM)
1608 | PASSTHRU_FEATURE_TODO(pConfig->enmSse4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A)
1609 | PASSTHRU_FEATURE_TODO(pConfig->enmMisAlnSse, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE)
1610 | PASSTHRU_FEATURE(pConfig->enm3dNowPrf, pHstFeat->f3DNowPrefetch, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF)
1611 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
1612 //| X86_CPUID_AMD_FEATURE_ECX_IBS
1613 //| X86_CPUID_AMD_FEATURE_ECX_XOP
1614 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
1615 //| X86_CPUID_AMD_FEATURE_ECX_WDT
1616 //| RT_BIT_32(14) - reserved
1617 //| X86_CPUID_AMD_FEATURE_ECX_LWP - not supported
1618 //| X86_CPUID_AMD_FEATURE_ECX_FMA4 - not yet virtualized.
1619 //| RT_BIT_32(17) - reserved
1620 //| RT_BIT_32(18) - reserved
1621 //| X86_CPUID_AMD_FEATURE_ECX_NODEID - not yet virtualized.
1622 //| RT_BIT_32(20) - reserved
1623 //| X86_CPUID_AMD_FEATURE_ECX_TBM - not yet virtualized.
1624 //| X86_CPUID_AMD_FEATURE_ECX_TOPOEXT - not yet virtualized.
1625 //| RT_BIT_32(23) - reserved
1626 //| RT_BIT_32(24) - reserved
1627 //| RT_BIT_32(25) - reserved
1628 //| RT_BIT_32(26) - reserved
1629 //| RT_BIT_32(27) - reserved
1630 //| RT_BIT_32(28) - reserved
1631 //| RT_BIT_32(29) - reserved
1632 //| RT_BIT_32(30) - reserved
1633 //| RT_BIT_32(31) - reserved
1634 ;
1635#ifdef VBOX_WITH_MULTI_CORE
1636 if ( pVM->cCpus > 1
1637 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
1638 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
1639 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; /* CmpLegacy */
1640#endif
1641
1642 if (pCpum->u8PortableCpuIdLevel > 0)
1643 {
1644 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
1645 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, SVM, X86_CPUID_AMD_FEATURE_ECX_SVM);
1646 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, ABM, X86_CPUID_AMD_FEATURE_ECX_ABM, pConfig->enmAbm);
1647 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SSE4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A, pConfig->enmSse4A);
1648 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, MISALNSSE, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE, pConfig->enmMisAlnSse);
1649 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, 3DNOWPRF, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF, pConfig->enm3dNowPrf);
1650 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, XOP, X86_CPUID_AMD_FEATURE_ECX_XOP);
1651 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, TBM, X86_CPUID_AMD_FEATURE_ECX_TBM);
1652 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, FMA4, X86_CPUID_AMD_FEATURE_ECX_FMA4);
1653 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEdx, AXMMX, X86_CPUID_AMD_FEATURE_EDX_AXMMX, pConfig->enmAmdExtMmx);
1654 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
1655 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
1656 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
1657 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
1658 PORTABLE_DISABLE_FEATURE_BIT( 2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
1659 PORTABLE_DISABLE_FEATURE_BIT( 3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
1660
1661 Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_SVM
1662 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
1663 | X86_CPUID_AMD_FEATURE_ECX_OSVW
1664 | X86_CPUID_AMD_FEATURE_ECX_IBS
1665 | X86_CPUID_AMD_FEATURE_ECX_SKINIT
1666 | X86_CPUID_AMD_FEATURE_ECX_WDT
1667 | X86_CPUID_AMD_FEATURE_ECX_LWP
1668 | X86_CPUID_AMD_FEATURE_ECX_NODEID
1669 | X86_CPUID_AMD_FEATURE_ECX_TOPOEXT
1670 | UINT32_C(0xff964000)
1671 )));
1672 Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10)
1673 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
1674 | RT_BIT(18)
1675 | RT_BIT(19)
1676 | RT_BIT(21)
1677 | X86_CPUID_AMD_FEATURE_EDX_AXMMX
1678 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
1679 | RT_BIT(28)
1680 )));
1681 }
1682
1683 /* Force extended feature bits. */
1684 if (pConfig->enmAbm == CPUMISAEXTCFG_ENABLED_ALWAYS)
1685 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_ABM;
1686 if (pConfig->enmSse4A == CPUMISAEXTCFG_ENABLED_ALWAYS)
1687 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SSE4A;
1688 if (pConfig->enmMisAlnSse == CPUMISAEXTCFG_ENABLED_ALWAYS)
1689 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_MISALNSSE;
1690 if (pConfig->enm3dNowPrf == CPUMISAEXTCFG_ENABLED_ALWAYS)
1691 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF;
1692 if (pConfig->enmAmdExtMmx == CPUMISAEXTCFG_ENABLED_ALWAYS)
1693 pExtFeatureLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_AXMMX;
1694 }
1695 pExtFeatureLeaf = NULL; /* Must refetch! */
1696
1697
1698 /* Cpuid 2:
1699 * Intel: (Nondeterministic) Cache and TLB information
1700 * AMD: Reserved
1701 * VIA: Reserved
1702 * Safe to expose.
1703 */
1704 uint32_t uSubLeaf = 0;
1705 PCPUMCPUIDLEAF pCurLeaf;
1706 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 2, uSubLeaf)) != NULL)
1707 {
1708 if ((pCurLeaf->uEax & 0xff) > 1)
1709 {
1710 LogRel(("CpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
1711 pCurLeaf->uEax &= UINT32_C(0xffffff01);
1712 }
1713 uSubLeaf++;
1714 }
1715
1716 /* Cpuid 3:
1717 * Intel: EAX, EBX - reserved (transmeta uses these)
1718 * ECX, EDX - Processor Serial Number if available, otherwise reserved
1719 * AMD: Reserved
1720 * VIA: Reserved
1721 * Safe to expose
1722 */
1723 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
1724 if (!(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN))
1725 {
1726 uSubLeaf = 0;
1727 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 3, uSubLeaf)) != NULL)
1728 {
1729 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
1730 if (pCpum->u8PortableCpuIdLevel > 0)
1731 pCurLeaf->uEax = pCurLeaf->uEbx = 0;
1732 uSubLeaf++;
1733 }
1734 }
1735
1736 /* Cpuid 4 + ECX:
1737 * Intel: Deterministic Cache Parameters Leaf.
1738 * AMD: Reserved
1739 * VIA: Reserved
1740 * Safe to expose, except for EAX:
1741 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
1742 * Bits 31-26: Maximum number of processor cores in this physical package**
1743 * Note: These SMP values are constant regardless of ECX
1744 */
1745 uSubLeaf = 0;
1746 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 4, uSubLeaf)) != NULL)
1747 {
1748 pCurLeaf->uEax &= UINT32_C(0x00003fff); /* Clear the #maxcores, #threads-sharing-cache (both are #-1).*/
1749#ifdef VBOX_WITH_MULTI_CORE
1750 if ( pVM->cCpus > 1
1751 && pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1752 {
1753 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
1754 /* One logical processor with possibly multiple cores. */
1755 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
1756 pCurLeaf->uEax |= pVM->cCpus <= 0x40 ? ((pVM->cCpus - 1) << 26) : UINT32_C(0xfc000000); /* 6 bits only -> 64 cores! */
1757 }
1758#endif
1759 uSubLeaf++;
1760 }
1761
1762 /* Cpuid 5: Monitor/mwait Leaf
1763 * Intel: ECX, EDX - reserved
1764 * EAX, EBX - Smallest and largest monitor line size
1765 * AMD: EDX - reserved
1766 * EAX, EBX - Smallest and largest monitor line size
1767 * ECX - extensions (ignored for now)
1768 * VIA: Reserved
1769 * Safe to expose
1770 */
1771 uSubLeaf = 0;
1772 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 5, uSubLeaf)) != NULL)
1773 {
1774 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
1775 if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
1776 pCurLeaf->uEax = pCurLeaf->uEbx = 0;
1777
1778 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
1779 if (pConfig->enmMWaitExtensions)
1780 {
1781 pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1782 /** @todo for now we just expose host's MWAIT C-states, although conceptually
1783 it shall be part of our power management virtualization model */
1784#if 0
1785 /* MWAIT sub C-states */
1786 pCurLeaf->uEdx =
1787 (0 << 0) /* 0 in C0 */ |
1788 (2 << 4) /* 2 in C1 */ |
1789 (2 << 8) /* 2 in C2 */ |
1790 (2 << 12) /* 2 in C3 */ |
1791 (0 << 16) /* 0 in C4 */
1792 ;
1793#endif
1794 }
1795 else
1796 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
1797 uSubLeaf++;
1798 }
1799
1800 /* Cpuid 6: Digital Thermal Sensor and Power Management Paramenters.
1801 * Intel: Various stuff.
1802 * AMD: EAX, EBX, EDX - reserved.
1803 * ECX - Bit zero is EffFreq, indicating MSR_0000_00e7 and MSR_0000_00e8
1804 * present. Same as intel.
1805 * VIA: ??
1806 *
1807 * We clear everything here for now.
1808 */
1809 cpumR3CpuIdZeroLeaf(pCpum, 6);
1810
1811 /* Cpuid 7 + ECX: Structured Extended Feature Flags Enumeration
1812 * EAX: Number of sub leaves.
1813 * EBX+ECX+EDX: Feature flags
1814 *
1815 * We only have documentation for one sub-leaf, so clear all other (no need
1816 * to remove them as such, just set them to zero).
1817 *
1818 * Note! When enabling new features the Synthetic CPU and Portable CPUID
1819 * options may require adjusting (i.e. stripping what was enabled).
1820 */
1821 uSubLeaf = 0;
1822 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, uSubLeaf)) != NULL)
1823 {
1824 switch (uSubLeaf)
1825 {
1826 case 0:
1827 {
1828 pCurLeaf->uEax = 0; /* Max ECX input is 0. */
1829 pCurLeaf->uEbx &= 0
1830 | PASSTHRU_FEATURE(pConfig->enmFsGsBase, pHstFeat->fFsGsBase, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE)
1831 //| X86_CPUID_STEXT_FEATURE_EBX_TSC_ADJUST RT_BIT(1)
1832 //| X86_CPUID_STEXT_FEATURE_EBX_SGX RT_BIT(2)
1833 | X86_CPUID_STEXT_FEATURE_EBX_BMI1
1834 //| X86_CPUID_STEXT_FEATURE_EBX_HLE RT_BIT(4)
1835 | PASSTHRU_FEATURE(pConfig->enmAvx2, pHstFeat->fAvx2, X86_CPUID_STEXT_FEATURE_EBX_AVX2)
1836 | X86_CPUID_STEXT_FEATURE_EBX_FDP_EXCPTN_ONLY
1837 //| X86_CPUID_STEXT_FEATURE_EBX_SMEP RT_BIT(7)
1838 | X86_CPUID_STEXT_FEATURE_EBX_BMI2
1839 //| X86_CPUID_STEXT_FEATURE_EBX_ERMS RT_BIT(9)
1840 | PASSTHRU_FEATURE(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)
1841 //| X86_CPUID_STEXT_FEATURE_EBX_RTM RT_BIT(11)
1842 //| X86_CPUID_STEXT_FEATURE_EBX_PQM RT_BIT(12)
1843 | X86_CPUID_STEXT_FEATURE_EBX_DEPR_FPU_CS_DS
1844 //| X86_CPUID_STEXT_FEATURE_EBX_MPE RT_BIT(14)
1845 //| X86_CPUID_STEXT_FEATURE_EBX_PQE RT_BIT(15)
1846 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512F RT_BIT(16)
1847 //| RT_BIT(17) - reserved
1848 | PASSTHRU_FEATURE_TODO(pConfig->enmRdSeed, X86_CPUID_STEXT_FEATURE_EBX_RDSEED)
1849 | PASSTHRU_FEATURE(pConfig->enmAdx, pHstFeat->fAdx, X86_CPUID_STEXT_FEATURE_EBX_ADX)
1850 //| X86_CPUID_STEXT_FEATURE_EBX_SMAP RT_BIT(20)
1851 //| RT_BIT(21) - reserved
1852 //| RT_BIT(22) - reserved
1853 | PASSTHRU_FEATURE(pConfig->enmCLFlushOpt, pHstFeat->fClFlushOpt, X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT)
1854 //| RT_BIT(24) - reserved
1855 //| X86_CPUID_STEXT_FEATURE_EBX_INTEL_PT RT_BIT(25)
1856 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512PF RT_BIT(26)
1857 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512ER RT_BIT(27)
1858 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512CD RT_BIT(28)
1859 | PASSTHRU_FEATURE(pConfig->enmSha, pHstFeat->fSha, X86_CPUID_STEXT_FEATURE_EBX_SHA)
1860 //| RT_BIT(30) - reserved
1861 //| RT_BIT(31) - reserved
1862 ;
1863 pCurLeaf->uEcx &= 0
1864 //| X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 - we do not do vector functions yet.
1865 ;
1866 pCurLeaf->uEdx &= 0
1867 | PASSTHRU_FEATURE(pConfig->enmMdsClear, pHstFeat->fMdsClear, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR)
1868 //| X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB RT_BIT(26)
1869 //| X86_CPUID_STEXT_FEATURE_EDX_STIBP RT_BIT(27)
1870 | PASSTHRU_FEATURE(pConfig->enmFlushCmdMsr, pHstFeat->fFlushCmd, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD)
1871 | PASSTHRU_FEATURE(pConfig->enmArchCapMsr, pHstFeat->fArchCap, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP)
1872 ;
1873
1874 /* Mask out INVPCID unless FSGSBASE is exposed due to a bug in Windows 10 SMP guests, see @bugref{9089#c15}. */
1875 if ( !pVM->cpum.s.GuestFeatures.fFsGsBase
1876 && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_INVPCID))
1877 {
1878 pCurLeaf->uEbx &= ~X86_CPUID_STEXT_FEATURE_EBX_INVPCID;
1879 LogRel(("CPUM: Disabled INVPCID without FSGSBASE to work around buggy guests\n"));
1880 }
1881
1882 if (pCpum->u8PortableCpuIdLevel > 0)
1883 {
1884 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, FSGSBASE, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE, pConfig->enmFsGsBase);
1885 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SGX, X86_CPUID_STEXT_FEATURE_EBX_SGX);
1886 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, AVX2, X86_CPUID_STEXT_FEATURE_EBX_AVX2, pConfig->enmAvx2);
1887 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMEP, X86_CPUID_STEXT_FEATURE_EBX_SMEP);
1888 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, BMI2, X86_CPUID_STEXT_FEATURE_EBX_BMI2);
1889 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, INVPCID, X86_CPUID_STEXT_FEATURE_EBX_INVPCID, pConfig->enmInvpcid);
1890 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512F, X86_CPUID_STEXT_FEATURE_EBX_AVX512F);
1891 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, RDSEED, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmRdSeed);
1892 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, ADX, X86_CPUID_STEXT_FEATURE_EBX_ADX, pConfig->enmAdx);
1893 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, CLFLUSHOPT, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmCLFlushOpt);
1894 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512PF, X86_CPUID_STEXT_FEATURE_EBX_AVX512PF);
1895 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512ER, X86_CPUID_STEXT_FEATURE_EBX_AVX512ER);
1896 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512CD, X86_CPUID_STEXT_FEATURE_EBX_AVX512CD);
1897 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMAP, X86_CPUID_STEXT_FEATURE_EBX_SMAP);
1898 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, SHA, X86_CPUID_STEXT_FEATURE_EBX_SHA, pConfig->enmSha);
1899 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEcx, PREFETCHWT1, X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1);
1900 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, FLUSH_CMD, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD, pConfig->enmFlushCmdMsr);
1901 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, MD_CLEAR, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR, pConfig->enmMdsClear);
1902 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, ARCHCAP, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP, pConfig->enmArchCapMsr);
1903 }
1904
1905 /* Dependencies. */
1906 if (!(pCurLeaf->uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD))
1907 pCurLeaf->uEdx &= ~X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR;
1908
1909 /* Force standard feature bits. */
1910 if (pConfig->enmFsGsBase == CPUMISAEXTCFG_ENABLED_ALWAYS)
1911 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE;
1912 if (pConfig->enmAvx2 == CPUMISAEXTCFG_ENABLED_ALWAYS)
1913 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_AVX2;
1914 if (pConfig->enmRdSeed == CPUMISAEXTCFG_ENABLED_ALWAYS)
1915 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_RDSEED;
1916 if (pConfig->enmAdx == CPUMISAEXTCFG_ENABLED_ALWAYS)
1917 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_ADX;
1918 if (pConfig->enmCLFlushOpt == CPUMISAEXTCFG_ENABLED_ALWAYS)
1919 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT;
1920 if (pConfig->enmSha == CPUMISAEXTCFG_ENABLED_ALWAYS)
1921 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_SHA;
1922 if (pConfig->enmInvpcid == CPUMISAEXTCFG_ENABLED_ALWAYS)
1923 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_INVPCID;
1924 if (pConfig->enmFlushCmdMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
1925 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD;
1926 if (pConfig->enmMdsClear == CPUMISAEXTCFG_ENABLED_ALWAYS)
1927 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR;
1928 if (pConfig->enmArchCapMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
1929 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP;
1930 break;
1931 }
1932
1933 default:
1934 /* Invalid index, all values are zero. */
1935 pCurLeaf->uEax = 0;
1936 pCurLeaf->uEbx = 0;
1937 pCurLeaf->uEcx = 0;
1938 pCurLeaf->uEdx = 0;
1939 break;
1940 }
1941 uSubLeaf++;
1942 }
1943
1944 /* Cpuid 8: Marked as reserved by Intel and AMD.
1945 * We zero this since we don't know what it may have been used for.
1946 */
1947 cpumR3CpuIdZeroLeaf(pCpum, 8);
1948
1949 /* Cpuid 9: Direct Cache Access (DCA) Parameters
1950 * Intel: EAX - Value of PLATFORM_DCA_CAP bits.
1951 * EBX, ECX, EDX - reserved.
1952 * AMD: Reserved
1953 * VIA: ??
1954 *
1955 * We zero this.
1956 */
1957 cpumR3CpuIdZeroLeaf(pCpum, 9);
1958
1959 /* Cpuid 0xa: Architectural Performance Monitor Features
1960 * Intel: EAX - Value of PLATFORM_DCA_CAP bits.
1961 * EBX, ECX, EDX - reserved.
1962 * AMD: Reserved
1963 * VIA: ??
1964 *
1965 * We zero this, for now at least.
1966 */
1967 cpumR3CpuIdZeroLeaf(pCpum, 10);
1968
1969 /* Cpuid 0xb+ECX: x2APIC Features / Processor Topology.
1970 * Intel: EAX - APCI ID shift right for next level.
1971 * EBX - Factory configured cores/threads at this level.
1972 * ECX - Level number (same as input) and level type (1,2,0).
1973 * EDX - Extended initial APIC ID.
1974 * AMD: Reserved
1975 * VIA: ??
1976 */
1977 uSubLeaf = 0;
1978 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 11, uSubLeaf)) != NULL)
1979 {
1980 if (pCurLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
1981 {
1982 uint8_t bLevelType = RT_BYTE2(pCurLeaf->uEcx);
1983 if (bLevelType == 1)
1984 {
1985 /* Thread level - we don't do threads at the moment. */
1986 pCurLeaf->uEax = 0; /** @todo is this correct? Real CPUs never do 0 here, I think... */
1987 pCurLeaf->uEbx = 1;
1988 }
1989 else if (bLevelType == 2)
1990 {
1991 /* Core level. */
1992 pCurLeaf->uEax = 1; /** @todo real CPUs are supposed to be in the 4-6 range, not 1. Our APIC ID assignments are a little special... */
1993#ifdef VBOX_WITH_MULTI_CORE
1994 while (RT_BIT_32(pCurLeaf->uEax) < pVM->cCpus)
1995 pCurLeaf->uEax++;
1996#endif
1997 pCurLeaf->uEbx = pVM->cCpus;
1998 }
1999 else
2000 {
2001 AssertLogRelMsg(bLevelType == 0, ("bLevelType=%#x uSubLeaf=%#x\n", bLevelType, uSubLeaf));
2002 pCurLeaf->uEax = 0;
2003 pCurLeaf->uEbx = 0;
2004 pCurLeaf->uEcx = 0;
2005 }
2006 pCurLeaf->uEcx = (pCurLeaf->uEcx & UINT32_C(0xffffff00)) | (uSubLeaf & 0xff);
2007 pCurLeaf->uEdx = 0; /* APIC ID is filled in by CPUMGetGuestCpuId() at runtime. Init for EMT(0) as usual. */
2008 }
2009 else
2010 {
2011 pCurLeaf->uEax = 0;
2012 pCurLeaf->uEbx = 0;
2013 pCurLeaf->uEcx = 0;
2014 pCurLeaf->uEdx = 0;
2015 }
2016 uSubLeaf++;
2017 }
2018
2019 /* Cpuid 0xc: Marked as reserved by Intel and AMD.
2020 * We zero this since we don't know what it may have been used for.
2021 */
2022 cpumR3CpuIdZeroLeaf(pCpum, 12);
2023
2024 /* Cpuid 0xd + ECX: Processor Extended State Enumeration
2025 * ECX=0: EAX - Valid bits in XCR0[31:0].
2026 * EBX - Maximum state size as per current XCR0 value.
2027 * ECX - Maximum state size for all supported features.
2028 * EDX - Valid bits in XCR0[63:32].
2029 * ECX=1: EAX - Various X-features.
2030 * EBX - Maximum state size as per current XCR0|IA32_XSS value.
2031 * ECX - Valid bits in IA32_XSS[31:0].
2032 * EDX - Valid bits in IA32_XSS[63:32].
2033 * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
2034 * if the bit invalid all four registers are set to zero.
2035 * EAX - The state size for this feature.
2036 * EBX - The state byte offset of this feature.
2037 * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
2038 * EDX - Reserved, but is set to zero if invalid sub-leaf index.
2039 *
2040 * Clear them all as we don't currently implement extended CPU state.
2041 */
2042 /* Figure out the supported XCR0/XSS mask component and make sure CPUID[1].ECX[27] = CR4.OSXSAVE. */
2043 uint64_t fGuestXcr0Mask = 0;
2044 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
2045 if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
2046 {
2047 fGuestXcr0Mask = XSAVE_C_X87 | XSAVE_C_SSE;
2048 if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_AVX))
2049 fGuestXcr0Mask |= XSAVE_C_YMM;
2050 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, 0);
2051 if (pCurLeaf && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F))
2052 fGuestXcr0Mask |= XSAVE_C_ZMM_16HI | XSAVE_C_ZMM_HI256 | XSAVE_C_OPMASK;
2053 fGuestXcr0Mask &= pCpum->fXStateHostMask;
2054
2055 pStdFeatureLeaf->fFlags |= CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE;
2056 }
2057 pStdFeatureLeaf = NULL;
2058 pCpum->fXStateGuestMask = fGuestXcr0Mask;
2059
2060 /* Work the sub-leaves. */
2061 uint32_t cbXSaveMaxActual = CPUM_MIN_XSAVE_AREA_SIZE;
2062 uint32_t cbXSaveMaxReport = CPUM_MIN_XSAVE_AREA_SIZE;
2063 for (uSubLeaf = 0; uSubLeaf < 63; uSubLeaf++)
2064 {
2065 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, uSubLeaf);
2066 if (pCurLeaf)
2067 {
2068 if (fGuestXcr0Mask)
2069 {
2070 switch (uSubLeaf)
2071 {
2072 case 0:
2073 pCurLeaf->uEax &= RT_LO_U32(fGuestXcr0Mask);
2074 pCurLeaf->uEdx &= RT_HI_U32(fGuestXcr0Mask);
2075 AssertLogRelMsgReturn((pCurLeaf->uEax & (XSAVE_C_X87 | XSAVE_C_SSE)) == (XSAVE_C_X87 | XSAVE_C_SSE),
2076 ("CPUID(0xd/0).EAX missing mandatory X87 or SSE bits: %#RX32", pCurLeaf->uEax),
2077 VERR_CPUM_IPE_1);
2078 cbXSaveMaxActual = pCurLeaf->uEcx;
2079 AssertLogRelMsgReturn(cbXSaveMaxActual <= CPUM_MAX_XSAVE_AREA_SIZE && cbXSaveMaxActual >= CPUM_MIN_XSAVE_AREA_SIZE,
2080 ("%#x max=%#x\n", cbXSaveMaxActual, CPUM_MAX_XSAVE_AREA_SIZE), VERR_CPUM_IPE_2);
2081 AssertLogRelMsgReturn(pCurLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE && pCurLeaf->uEbx <= cbXSaveMaxActual,
2082 ("ebx=%#x cbXSaveMaxActual=%#x\n", pCurLeaf->uEbx, cbXSaveMaxActual),
2083 VERR_CPUM_IPE_2);
2084 continue;
2085 case 1:
2086 pCurLeaf->uEax &= 0;
2087 pCurLeaf->uEcx &= 0;
2088 pCurLeaf->uEdx &= 0;
2089 /** @todo what about checking ebx? */
2090 continue;
2091 default:
2092 if (fGuestXcr0Mask & RT_BIT_64(uSubLeaf))
2093 {
2094 AssertLogRelMsgReturn( pCurLeaf->uEax <= cbXSaveMaxActual
2095 && pCurLeaf->uEax > 0
2096 && pCurLeaf->uEbx < cbXSaveMaxActual
2097 && pCurLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE
2098 && pCurLeaf->uEbx + pCurLeaf->uEax <= cbXSaveMaxActual,
2099 ("%#x: eax=%#x ebx=%#x cbMax=%#x\n",
2100 uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, cbXSaveMaxActual),
2101 VERR_CPUM_IPE_2);
2102 AssertLogRel(!(pCurLeaf->uEcx & 1));
2103 pCurLeaf->uEcx = 0; /* Bit 0 should be zero (XCR0), the reset are reserved... */
2104 pCurLeaf->uEdx = 0; /* it's reserved... */
2105 if (pCurLeaf->uEbx + pCurLeaf->uEax > cbXSaveMaxReport)
2106 cbXSaveMaxReport = pCurLeaf->uEbx + pCurLeaf->uEax;
2107 continue;
2108 }
2109 break;
2110 }
2111 }
2112
2113 /* Clear the leaf. */
2114 pCurLeaf->uEax = 0;
2115 pCurLeaf->uEbx = 0;
2116 pCurLeaf->uEcx = 0;
2117 pCurLeaf->uEdx = 0;
2118 }
2119 }
2120
2121 /* Update the max and current feature sizes to shut up annoying Linux kernels. */
2122 if (cbXSaveMaxReport != cbXSaveMaxActual && fGuestXcr0Mask)
2123 {
2124 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, 0);
2125 if (pCurLeaf)
2126 {
2127 LogRel(("CPUM: Changing leaf 13[0]: EBX=%#RX32 -> %#RX32, ECX=%#RX32 -> %#RX32\n",
2128 pCurLeaf->uEbx, cbXSaveMaxReport, pCurLeaf->uEcx, cbXSaveMaxReport));
2129 pCurLeaf->uEbx = cbXSaveMaxReport;
2130 pCurLeaf->uEcx = cbXSaveMaxReport;
2131 }
2132 }
2133
2134 /* Cpuid 0xe: Marked as reserved by Intel and AMD.
2135 * We zero this since we don't know what it may have been used for.
2136 */
2137 cpumR3CpuIdZeroLeaf(pCpum, 14);
2138
2139 /* Cpuid 0xf + ECX: Platform quality of service monitoring (PQM),
2140 * also known as Intel Resource Director Technology (RDT) Monitoring
2141 * We zero this as we don't currently virtualize PQM.
2142 */
2143 cpumR3CpuIdZeroLeaf(pCpum, 15);
2144
2145 /* Cpuid 0x10 + ECX: Platform quality of service enforcement (PQE),
2146 * also known as Intel Resource Director Technology (RDT) Allocation
2147 * We zero this as we don't currently virtualize PQE.
2148 */
2149 cpumR3CpuIdZeroLeaf(pCpum, 16);
2150
2151 /* Cpuid 0x11: Marked as reserved by Intel and AMD.
2152 * We zero this since we don't know what it may have been used for.
2153 */
2154 cpumR3CpuIdZeroLeaf(pCpum, 17);
2155
2156 /* Cpuid 0x12 + ECX: SGX resource enumeration.
2157 * We zero this as we don't currently virtualize this.
2158 */
2159 cpumR3CpuIdZeroLeaf(pCpum, 18);
2160
2161 /* Cpuid 0x13: Marked as reserved by Intel and AMD.
2162 * We zero this since we don't know what it may have been used for.
2163 */
2164 cpumR3CpuIdZeroLeaf(pCpum, 19);
2165
2166 /* Cpuid 0x14 + ECX: Processor Trace (PT) capability enumeration.
2167 * We zero this as we don't currently virtualize this.
2168 */
2169 cpumR3CpuIdZeroLeaf(pCpum, 20);
2170
2171 /* Cpuid 0x15: Timestamp Counter / Core Crystal Clock info.
2172 * Intel: uTscFrequency = uCoreCrystalClockFrequency * EBX / EAX.
2173 * EAX - denominator (unsigned).
2174 * EBX - numerator (unsigned).
2175 * ECX, EDX - reserved.
2176 * AMD: Reserved / undefined / not implemented.
2177 * VIA: Reserved / undefined / not implemented.
2178 * We zero this as we don't currently virtualize this.
2179 */
2180 cpumR3CpuIdZeroLeaf(pCpum, 21);
2181
2182 /* Cpuid 0x16: Processor frequency info
2183 * Intel: EAX - Core base frequency in MHz.
2184 * EBX - Core maximum frequency in MHz.
2185 * ECX - Bus (reference) frequency in MHz.
2186 * EDX - Reserved.
2187 * AMD: Reserved / undefined / not implemented.
2188 * VIA: Reserved / undefined / not implemented.
2189 * We zero this as we don't currently virtualize this.
2190 */
2191 cpumR3CpuIdZeroLeaf(pCpum, 22);
2192
2193 /* Cpuid 0x17..0x10000000: Unknown.
2194 * We don't know these and what they mean, so remove them. */
2195 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2196 UINT32_C(0x00000017), UINT32_C(0x0fffffff));
2197
2198
2199 /* CpuId 0x40000000..0x4fffffff: Reserved for hypervisor/emulator.
2200 * We remove all these as we're a hypervisor and must provide our own.
2201 */
2202 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2203 UINT32_C(0x40000000), UINT32_C(0x4fffffff));
2204
2205
2206 /* Cpuid 0x80000000 is harmless. */
2207
2208 /* Cpuid 0x80000001 is handled with cpuid 1 way up above. */
2209
2210 /* Cpuid 0x80000002...0x80000004 contains the processor name and is considered harmless. */
2211
2212 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
2213 * Safe to pass on to the guest.
2214 *
2215 * AMD: 0x800000005 L1 cache information
2216 * 0x800000006 L2/L3 cache information
2217 * Intel: 0x800000005 reserved
2218 * 0x800000006 L2 cache information
2219 * VIA: 0x800000005 TLB and L1 cache information
2220 * 0x800000006 L2 cache information
2221 */
2222
2223 /* Cpuid 0x800000007: Advanced Power Management Information.
2224 * AMD: EAX: Processor feedback capabilities.
2225 * EBX: RAS capabilites.
2226 * ECX: Advanced power monitoring interface.
2227 * EDX: Enhanced power management capabilities.
2228 * Intel: EAX, EBX, ECX - reserved.
2229 * EDX - Invariant TSC indicator supported (bit 8), the rest is reserved.
2230 * VIA: Reserved
2231 * We let the guest see EDX_TSCINVAR (and later maybe EDX_EFRO). Actually, we should set EDX_TSCINVAR.
2232 */
2233 uSubLeaf = 0;
2234 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000007), uSubLeaf)) != NULL)
2235 {
2236 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
2237 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2238 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
2239 {
2240 /*
2241 * Older 64-bit linux kernels blindly assume that the AMD performance counters work
2242 * if X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR is set, see @bugref{7243#c85}. Exposing this
2243 * bit is now configurable.
2244 */
2245 pCurLeaf->uEdx &= 0
2246 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
2247 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
2248 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
2249 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
2250 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
2251 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
2252 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
2253 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
2254 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
2255 //| X86_CPUID_AMD_ADVPOWER_EDX_CPB RT_BIT(9)
2256 //| X86_CPUID_AMD_ADVPOWER_EDX_EFRO RT_BIT(10)
2257 //| X86_CPUID_AMD_ADVPOWER_EDX_PFI RT_BIT(11)
2258 //| X86_CPUID_AMD_ADVPOWER_EDX_PA RT_BIT(12)
2259 | 0;
2260 }
2261 else
2262 pCurLeaf->uEdx &= X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR;
2263 if (!pConfig->fInvariantTsc)
2264 pCurLeaf->uEdx &= ~X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR;
2265 uSubLeaf++;
2266 }
2267
2268 /* Cpuid 0x80000008:
2269 * AMD: EBX, EDX - reserved
2270 * EAX: Virtual/Physical/Guest address Size
2271 * ECX: Number of cores + APICIdCoreIdSize
2272 * Intel: EAX: Virtual/Physical address Size
2273 * EBX, ECX, EDX - reserved
2274 * VIA: EAX: Virtual/Physical address Size
2275 * EBX, ECX, EDX - reserved
2276 *
2277 * We only expose the virtual+pysical address size to the guest atm.
2278 * On AMD we set the core count, but not the apic id stuff as we're
2279 * currently not doing the apic id assignments in a complatible manner.
2280 */
2281 uSubLeaf = 0;
2282 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000008), uSubLeaf)) != NULL)
2283 {
2284 pCurLeaf->uEax &= UINT32_C(0x0000ffff); /* Virtual & physical address sizes only. */
2285 pCurLeaf->uEbx = 0; /* reserved - [12] == IBPB */
2286 pCurLeaf->uEdx = 0; /* reserved */
2287
2288 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu).
2289 * Set core count to 0, indicating 1 core. Adjust if we're in multi core mode on AMD. */
2290 pCurLeaf->uEcx = 0;
2291#ifdef VBOX_WITH_MULTI_CORE
2292 if ( pVM->cCpus > 1
2293 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2294 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
2295 pCurLeaf->uEcx |= (pVM->cCpus - 1) & UINT32_C(0xff);
2296#endif
2297 uSubLeaf++;
2298 }
2299
2300 /* Cpuid 0x80000009: Reserved
2301 * We zero this since we don't know what it may have been used for.
2302 */
2303 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x80000009));
2304
2305 /* Cpuid 0x8000000a: SVM information on AMD, invalid on Intel.
2306 * AMD: EAX - SVM revision.
2307 * EBX - Number of ASIDs.
2308 * ECX - Reserved.
2309 * EDX - SVM Feature identification.
2310 */
2311 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2312 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
2313 {
2314 pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
2315 if ( pExtFeatureLeaf
2316 && (pExtFeatureLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM))
2317 {
2318 PCPUMCPUIDLEAF pSvmFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0x8000000a, 0);
2319 if (pSvmFeatureLeaf)
2320 {
2321 pSvmFeatureLeaf->uEax = 0x1;
2322 pSvmFeatureLeaf->uEbx = 0x8000; /** @todo figure out virtual NASID. */
2323 pSvmFeatureLeaf->uEcx = 0;
2324 pSvmFeatureLeaf->uEdx &= ( X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE /** @todo Support other SVM features */
2325 | X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID
2326 | X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
2327 }
2328 else
2329 {
2330 /* Should never happen. */
2331 LogRel(("CPUM: Warning! Expected CPUID leaf 0x8000000a not present! SVM features not exposed to the guest\n"));
2332 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
2333 }
2334 }
2335 else
2336 {
2337 /* If SVM is not supported, this is reserved, zero out. */
2338 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
2339 }
2340 }
2341 else
2342 {
2343 /* Cpuid 0x8000000a: Reserved on Intel.
2344 * We zero this since we don't know what it may have been used for.
2345 */
2346 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
2347 }
2348
2349 /* Cpuid 0x8000000b thru 0x80000018: Reserved
2350 * We clear these as we don't know what purpose they might have. */
2351 for (uint32_t uLeaf = UINT32_C(0x8000000b); uLeaf <= UINT32_C(0x80000018); uLeaf++)
2352 cpumR3CpuIdZeroLeaf(pCpum, uLeaf);
2353
2354 /* Cpuid 0x80000019: TLB configuration
2355 * Seems to be harmless, pass them thru as is. */
2356
2357 /* Cpuid 0x8000001a: Peformance optimization identifiers.
2358 * Strip anything we don't know what is or addresses feature we don't implement. */
2359 uSubLeaf = 0;
2360 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001a), uSubLeaf)) != NULL)
2361 {
2362 pCurLeaf->uEax &= RT_BIT_32(0) /* FP128 - use 1x128-bit instead of 2x64-bit. */
2363 | RT_BIT_32(1) /* MOVU - Prefere unaligned MOV over MOVL + MOVH. */
2364 //| RT_BIT_32(2) /* FP256 - use 1x256-bit instead of 2x128-bit. */
2365 ;
2366 pCurLeaf->uEbx = 0; /* reserved */
2367 pCurLeaf->uEcx = 0; /* reserved */
2368 pCurLeaf->uEdx = 0; /* reserved */
2369 uSubLeaf++;
2370 }
2371
2372 /* Cpuid 0x8000001b: Instruct based sampling (IBS) information.
2373 * Clear this as we don't currently virtualize this feature. */
2374 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000001b));
2375
2376 /* Cpuid 0x8000001c: Lightweight profiling (LWP) information.
2377 * Clear this as we don't currently virtualize this feature. */
2378 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000001c));
2379
2380 /* Cpuid 0x8000001d+ECX: Get cache configuration descriptors.
2381 * We need to sanitize the cores per cache (EAX[25:14]).
2382 *
2383 * This is very much the same as Intel's CPUID(4) leaf, except EAX[31:26]
2384 * and EDX[2] are reserved here, and EAX[14:25] is documented having a
2385 * slightly different meaning.
2386 */
2387 uSubLeaf = 0;
2388 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001d), uSubLeaf)) != NULL)
2389 {
2390#ifdef VBOX_WITH_MULTI_CORE
2391 uint32_t cCores = ((pCurLeaf->uEax >> 14) & 0xfff) + 1;
2392 if (cCores > pVM->cCpus)
2393 cCores = pVM->cCpus;
2394 pCurLeaf->uEax &= UINT32_C(0x00003fff);
2395 pCurLeaf->uEax |= ((cCores - 1) & 0xfff) << 14;
2396#else
2397 pCurLeaf->uEax &= UINT32_C(0x00003fff);
2398#endif
2399 uSubLeaf++;
2400 }
2401
2402 /* Cpuid 0x8000001e: Get APIC / unit / node information.
2403 * If AMD, we configure it for our layout (on EMT(0)). In the multi-core
2404 * setup, we have one compute unit with all the cores in it. Single node.
2405 */
2406 uSubLeaf = 0;
2407 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001e), uSubLeaf)) != NULL)
2408 {
2409 pCurLeaf->uEax = 0; /* Extended APIC ID = EMT(0).idApic (== 0). */
2410 if (pCurLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
2411 {
2412#ifdef VBOX_WITH_MULTI_CORE
2413 pCurLeaf->uEbx = pVM->cCpus < 0x100
2414 ? (pVM->cCpus - 1) << 8 : UINT32_C(0x0000ff00); /* Compute unit ID 0, core per unit. */
2415#else
2416 pCurLeaf->uEbx = 0; /* Compute unit ID 0, 1 core per unit. */
2417#endif
2418 pCurLeaf->uEcx = 0; /* Node ID 0, 1 node per CPU. */
2419 }
2420 else
2421 {
2422 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_AMD);
2423 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_HYGON);
2424 pCurLeaf->uEbx = 0; /* Reserved. */
2425 pCurLeaf->uEcx = 0; /* Reserved. */
2426 }
2427 pCurLeaf->uEdx = 0; /* Reserved. */
2428 uSubLeaf++;
2429 }
2430
2431 /* Cpuid 0x8000001f...0x8ffffffd: Unknown.
2432 * We don't know these and what they mean, so remove them. */
2433 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2434 UINT32_C(0x8000001f), UINT32_C(0x8ffffffd));
2435
2436 /* Cpuid 0x8ffffffe: Mystery AMD K6 leaf.
2437 * Just pass it thru for now. */
2438
2439 /* Cpuid 0x8fffffff: Mystery hammer time leaf!
2440 * Just pass it thru for now. */
2441
2442 /* Cpuid 0xc0000000: Centaur stuff.
2443 * Harmless, pass it thru. */
2444
2445 /* Cpuid 0xc0000001: Centaur features.
2446 * VIA: EAX - Family, model, stepping.
2447 * EDX - Centaur extended feature flags. Nothing interesting, except may
2448 * FEMMS (bit 5), but VIA marks it as 'reserved', so never mind.
2449 * EBX, ECX - reserved.
2450 * We keep EAX but strips the rest.
2451 */
2452 uSubLeaf = 0;
2453 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000001), uSubLeaf)) != NULL)
2454 {
2455 pCurLeaf->uEbx = 0;
2456 pCurLeaf->uEcx = 0;
2457 pCurLeaf->uEdx = 0; /* Bits 0 thru 9 are documented on sandpil.org, but we don't want them, except maybe 5 (FEMMS). */
2458 uSubLeaf++;
2459 }
2460
2461 /* Cpuid 0xc0000002: Old Centaur Current Performance Data.
2462 * We only have fixed stale values, but should be harmless. */
2463
2464 /* Cpuid 0xc0000003: Reserved.
2465 * We zero this since we don't know what it may have been used for.
2466 */
2467 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0xc0000003));
2468
2469 /* Cpuid 0xc0000004: Centaur Performance Info.
2470 * We only have fixed stale values, but should be harmless. */
2471
2472
2473 /* Cpuid 0xc0000005...0xcfffffff: Unknown.
2474 * We don't know these and what they mean, so remove them. */
2475 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2476 UINT32_C(0xc0000005), UINT32_C(0xcfffffff));
2477
2478 return VINF_SUCCESS;
2479#undef PORTABLE_DISABLE_FEATURE_BIT
2480#undef PORTABLE_CLEAR_BITS_WHEN
2481}
2482
2483
2484/**
2485 * Reads a value in /CPUM/IsaExts/ node.
2486 *
2487 * @returns VBox status code (error message raised).
2488 * @param pVM The cross context VM structure. (For errors.)
2489 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
2490 * @param pszValueName The value / extension name.
2491 * @param penmValue Where to return the choice.
2492 * @param enmDefault The default choice.
2493 */
2494static int cpumR3CpuIdReadIsaExtCfg(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
2495 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault)
2496{
2497 /*
2498 * Try integer encoding first.
2499 */
2500 uint64_t uValue;
2501 int rc = CFGMR3QueryInteger(pIsaExts, pszValueName, &uValue);
2502 if (RT_SUCCESS(rc))
2503 switch (uValue)
2504 {
2505 case 0: *penmValue = CPUMISAEXTCFG_DISABLED; break;
2506 case 1: *penmValue = CPUMISAEXTCFG_ENABLED_SUPPORTED; break;
2507 case 2: *penmValue = CPUMISAEXTCFG_ENABLED_ALWAYS; break;
2508 case 9: *penmValue = CPUMISAEXTCFG_ENABLED_PORTABLE; break;
2509 default:
2510 return VMSetError(pVM, VERR_CPUM_INVALID_CONFIG_VALUE, RT_SRC_POS,
2511 "Invalid config value for '/CPUM/IsaExts/%s': %llu (expected 0/'disabled', 1/'enabled', 2/'portable', or 9/'forced')",
2512 pszValueName, uValue);
2513 }
2514 /*
2515 * If missing, use default.
2516 */
2517 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
2518 *penmValue = enmDefault;
2519 else
2520 {
2521 if (rc == VERR_CFGM_NOT_INTEGER)
2522 {
2523 /*
2524 * Not an integer, try read it as a string.
2525 */
2526 char szValue[32];
2527 rc = CFGMR3QueryString(pIsaExts, pszValueName, szValue, sizeof(szValue));
2528 if (RT_SUCCESS(rc))
2529 {
2530 RTStrToLower(szValue);
2531 size_t cchValue = strlen(szValue);
2532#define EQ(a_str) (cchValue == sizeof(a_str) - 1U && memcmp(szValue, a_str, sizeof(a_str) - 1))
2533 if ( EQ("disabled") || EQ("disable") || EQ("off") || EQ("no"))
2534 *penmValue = CPUMISAEXTCFG_DISABLED;
2535 else if (EQ("enabled") || EQ("enable") || EQ("on") || EQ("yes"))
2536 *penmValue = CPUMISAEXTCFG_ENABLED_SUPPORTED;
2537 else if (EQ("forced") || EQ("force") || EQ("always"))
2538 *penmValue = CPUMISAEXTCFG_ENABLED_ALWAYS;
2539 else if (EQ("portable"))
2540 *penmValue = CPUMISAEXTCFG_ENABLED_PORTABLE;
2541 else if (EQ("default") || EQ("def"))
2542 *penmValue = enmDefault;
2543 else
2544 return VMSetError(pVM, VERR_CPUM_INVALID_CONFIG_VALUE, RT_SRC_POS,
2545 "Invalid config value for '/CPUM/IsaExts/%s': '%s' (expected 0/'disabled', 1/'enabled', 2/'portable', or 9/'forced')",
2546 pszValueName, uValue);
2547#undef EQ
2548 }
2549 }
2550 if (RT_FAILURE(rc))
2551 return VMSetError(pVM, rc, RT_SRC_POS, "Error reading config value '/CPUM/IsaExts/%s': %Rrc", pszValueName, rc);
2552 }
2553 return VINF_SUCCESS;
2554}
2555
2556
2557/**
2558 * Reads a value in /CPUM/IsaExts/ node, forcing it to DISABLED if wanted.
2559 *
2560 * @returns VBox status code (error message raised).
2561 * @param pVM The cross context VM structure. (For errors.)
2562 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
2563 * @param pszValueName The value / extension name.
2564 * @param penmValue Where to return the choice.
2565 * @param enmDefault The default choice.
2566 * @param fAllowed Allowed choice. Applied both to the result and to
2567 * the default value.
2568 */
2569static int cpumR3CpuIdReadIsaExtCfgEx(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
2570 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault, bool fAllowed)
2571{
2572 int rc;
2573 if (fAllowed)
2574 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
2575 else
2576 {
2577 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, false /*enmDefault*/);
2578 if (RT_SUCCESS(rc) && *penmValue == CPUMISAEXTCFG_ENABLED_ALWAYS)
2579 LogRel(("CPUM: Ignoring forced '%s'\n", pszValueName));
2580 *penmValue = CPUMISAEXTCFG_DISABLED;
2581 }
2582 return rc;
2583}
2584
2585
2586/**
2587 * Reads a value in /CPUM/IsaExts/ node that used to be located in /CPUM/.
2588 *
2589 * @returns VBox status code (error message raised).
2590 * @param pVM The cross context VM structure. (For errors.)
2591 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
2592 * @param pCpumCfg The /CPUM node (can be NULL).
2593 * @param pszValueName The value / extension name.
2594 * @param penmValue Where to return the choice.
2595 * @param enmDefault The default choice.
2596 */
2597static int cpumR3CpuIdReadIsaExtCfgLegacy(PVM pVM, PCFGMNODE pIsaExts, PCFGMNODE pCpumCfg, const char *pszValueName,
2598 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault)
2599{
2600 if (CFGMR3Exists(pCpumCfg, pszValueName))
2601 {
2602 if (!CFGMR3Exists(pIsaExts, pszValueName))
2603 LogRel(("Warning: /CPUM/%s is deprecated, use /CPUM/IsaExts/%s instead.\n", pszValueName, pszValueName));
2604 else
2605 return VMSetError(pVM, VERR_DUPLICATE, RT_SRC_POS,
2606 "Duplicate config values '/CPUM/%s' and '/CPUM/IsaExts/%s' - please remove the former!",
2607 pszValueName, pszValueName);
2608
2609 bool fLegacy;
2610 int rc = CFGMR3QueryBoolDef(pCpumCfg, pszValueName, &fLegacy, enmDefault != CPUMISAEXTCFG_DISABLED);
2611 if (RT_SUCCESS(rc))
2612 {
2613 *penmValue = fLegacy;
2614 return VINF_SUCCESS;
2615 }
2616 return VMSetError(pVM, VERR_DUPLICATE, RT_SRC_POS, "Error querying '/CPUM/%s': %Rrc", pszValueName, rc);
2617 }
2618
2619 return cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
2620}
2621
2622
2623static int cpumR3CpuIdReadConfig(PVM pVM, PCPUMCPUIDCONFIG pConfig, PCFGMNODE pCpumCfg, bool fNestedPagingAndFullGuestExec)
2624{
2625 int rc;
2626
2627 /** @cfgm{/CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}
2628 * When non-zero CPUID features that could cause portability issues will be
2629 * stripped. The higher the value the more features gets stripped. Higher
2630 * values should only be used when older CPUs are involved since it may
2631 * harm performance and maybe also cause problems with specific guests. */
2632 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pVM->cpum.s.u8PortableCpuIdLevel, 0);
2633 AssertLogRelRCReturn(rc, rc);
2634
2635 /** @cfgm{/CPUM/GuestCpuName, string}
2636 * The name of the CPU we're to emulate. The default is the host CPU.
2637 * Note! CPUs other than "host" one is currently unsupported. */
2638 rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", pConfig->szCpuName, sizeof(pConfig->szCpuName), "host");
2639 AssertLogRelRCReturn(rc, rc);
2640
2641 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
2642 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
2643 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
2644 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
2645 */
2646 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &pConfig->fNt4LeafLimit, false);
2647 AssertLogRelRCReturn(rc, rc);
2648
2649 /** @cfgm{/CPUM/InvariantTsc, boolean, true}
2650 * Pass-through the invariant TSC flag in 0x80000007 if available on the host
2651 * CPU. On AMD CPUs, users may wish to suppress it to avoid trouble from older
2652 * 64-bit linux guests which assume the presence of AMD performance counters
2653 * that we do not virtualize.
2654 */
2655 rc = CFGMR3QueryBoolDef(pCpumCfg, "InvariantTsc", &pConfig->fInvariantTsc, true);
2656 AssertLogRelRCReturn(rc, rc);
2657
2658 /** @cfgm{/CPUM/ForceVme, boolean, false}
2659 * Always expose the VME (Virtual-8086 Mode Extensions) capability if true.
2660 * By default the flag is passed thru as is from the host CPU, except
2661 * on AMD Ryzen CPUs where it's masked to avoid trouble with XP/Server 2003
2662 * guests and DOS boxes in general.
2663 */
2664 rc = CFGMR3QueryBoolDef(pCpumCfg, "ForceVme", &pConfig->fForceVme, false);
2665 AssertLogRelRCReturn(rc, rc);
2666
2667 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
2668 * Restrict the reported CPU family+model+stepping of intel CPUs. This is
2669 * probably going to be a temporary hack, so don't depend on this.
2670 * The 1st byte of the value is the stepping, the 2nd byte value is the model
2671 * number and the 3rd byte value is the family, and the 4th value must be zero.
2672 */
2673 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &pConfig->uMaxIntelFamilyModelStep, UINT32_MAX);
2674 AssertLogRelRCReturn(rc, rc);
2675
2676 /** @cfgm{/CPUM/MaxStdLeaf, uint32_t, 0x00000016}
2677 * The last standard leaf to keep. The actual last value that is stored in EAX
2678 * is RT_MAX(CPUID[0].EAX,/CPUM/MaxStdLeaf). Leaves beyond the max leaf are
2679 * removed. (This works independently of and differently from NT4LeafLimit.)
2680 * The default is usually set to what we're able to reasonably sanitize.
2681 */
2682 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxStdLeaf", &pConfig->uMaxStdLeaf, UINT32_C(0x00000016));
2683 AssertLogRelRCReturn(rc, rc);
2684
2685 /** @cfgm{/CPUM/MaxExtLeaf, uint32_t, 0x8000001e}
2686 * The last extended leaf to keep. The actual last value that is stored in EAX
2687 * is RT_MAX(CPUID[0x80000000].EAX,/CPUM/MaxStdLeaf). Leaves beyond the max
2688 * leaf are removed. The default is set to what we're able to sanitize.
2689 */
2690 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxExtLeaf", &pConfig->uMaxExtLeaf, UINT32_C(0x8000001e));
2691 AssertLogRelRCReturn(rc, rc);
2692
2693 /** @cfgm{/CPUM/MaxCentaurLeaf, uint32_t, 0xc0000004}
2694 * The last extended leaf to keep. The actual last value that is stored in EAX
2695 * is RT_MAX(CPUID[0xc0000000].EAX,/CPUM/MaxCentaurLeaf). Leaves beyond the max
2696 * leaf are removed. The default is set to what we're able to sanitize.
2697 */
2698 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxCentaurLeaf", &pConfig->uMaxCentaurLeaf, UINT32_C(0xc0000004));
2699 AssertLogRelRCReturn(rc, rc);
2700
2701 bool fQueryNestedHwvirt = false
2702#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2703 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2704 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON
2705#endif
2706#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2707 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
2708 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA
2709#endif
2710 ;
2711 if (fQueryNestedHwvirt)
2712 {
2713 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
2714 * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
2715 * The default is false, and when enabled requires a 64-bit CPU with support for
2716 * nested-paging and AMD-V or unrestricted guest mode.
2717 */
2718 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
2719 AssertLogRelRCReturn(rc, rc);
2720 if (pConfig->fNestedHWVirt)
2721 {
2722 /** @todo Think about enabling this later with NEM/KVM. */
2723 if (VM_IS_NEM_ENABLED(pVM))
2724 {
2725 LogRel(("CPUM: Warning! Can't turn on nested VT-x/AMD-V when NEM is used! (later)\n"));
2726 pConfig->fNestedHWVirt = false;
2727 }
2728 else if (!fNestedPagingAndFullGuestExec)
2729 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
2730 "Cannot enable nested VT-x/AMD-V without nested-paging and unrestricted guest execution!\n");
2731 }
2732 }
2733
2734 /*
2735 * Instruction Set Architecture (ISA) Extensions.
2736 */
2737 PCFGMNODE pIsaExts = CFGMR3GetChild(pCpumCfg, "IsaExts");
2738 if (pIsaExts)
2739 {
2740 rc = CFGMR3ValidateConfig(pIsaExts, "/CPUM/IsaExts/",
2741 "CMPXCHG16B"
2742 "|MONITOR"
2743 "|MWaitExtensions"
2744 "|SSE4.1"
2745 "|SSE4.2"
2746 "|XSAVE"
2747 "|AVX"
2748 "|AVX2"
2749 "|AESNI"
2750 "|PCLMUL"
2751 "|POPCNT"
2752 "|MOVBE"
2753 "|RDRAND"
2754 "|RDSEED"
2755 "|ADX"
2756 "|CLFLUSHOPT"
2757 "|SHA"
2758 "|FSGSBASE"
2759 "|PCID"
2760 "|INVPCID"
2761 "|FlushCmdMsr"
2762 "|ABM"
2763 "|SSE4A"
2764 "|MISALNSSE"
2765 "|3DNOWPRF"
2766 "|AXMMX"
2767 , "" /*pszValidNodes*/, "CPUM" /*pszWho*/, 0 /*uInstance*/);
2768 if (RT_FAILURE(rc))
2769 return rc;
2770 }
2771
2772 /** @cfgm{/CPUM/IsaExts/CMPXCHG16B, boolean, true}
2773 * Expose CMPXCHG16B to the guest if available. All host CPUs which support
2774 * hardware virtualization have it.
2775 */
2776 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "CMPXCHG16B", &pConfig->enmCmpXchg16b, true);
2777 AssertLogRelRCReturn(rc, rc);
2778
2779 /** @cfgm{/CPUM/IsaExts/MONITOR, boolean, true}
2780 * Expose MONITOR/MWAIT instructions to the guest.
2781 */
2782 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "MONITOR", &pConfig->enmMonitor, true);
2783 AssertLogRelRCReturn(rc, rc);
2784
2785 /** @cfgm{/CPUM/IsaExts/MWaitExtensions, boolean, false}
2786 * Expose MWAIT extended features to the guest. For now we expose just MWAIT
2787 * break on interrupt feature (bit 1).
2788 */
2789 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "MWaitExtensions", &pConfig->enmMWaitExtensions, false);
2790 AssertLogRelRCReturn(rc, rc);
2791
2792 /** @cfgm{/CPUM/IsaExts/SSE4.1, boolean, true}
2793 * Expose SSE4.1 to the guest if available.
2794 */
2795 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.1", &pConfig->enmSse41, true);
2796 AssertLogRelRCReturn(rc, rc);
2797
2798 /** @cfgm{/CPUM/IsaExts/SSE4.2, boolean, true}
2799 * Expose SSE4.2 to the guest if available.
2800 */
2801 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.2", &pConfig->enmSse42, true);
2802 AssertLogRelRCReturn(rc, rc);
2803
2804 bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.fXSaveRstor
2805 && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor
2806 && ( VM_IS_NEM_ENABLED(pVM)
2807 ? NEMHCGetFeatures(pVM) & NEM_FEAT_F_XSAVE_XRSTOR
2808 : VM_IS_EXEC_ENGINE_IEM(pVM)
2809 ? false /** @todo IEM and XSAVE @bugref{9898} */
2810 : fNestedPagingAndFullGuestExec);
2811 uint64_t const fXStateHostMask = pVM->cpum.s.fXStateHostMask;
2812
2813 /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends}
2814 * Expose XSAVE/XRSTOR to the guest if available. For the time being the
2815 * default is to only expose this to VMs with nested paging and AMD-V or
2816 * unrestricted guest execution mode. Not possible to force this one without
2817 * host support at the moment.
2818 */
2819 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "XSAVE", &pConfig->enmXSave, fNestedPagingAndFullGuestExec,
2820 fMayHaveXSave /*fAllowed*/);
2821 AssertLogRelRCReturn(rc, rc);
2822
2823 /** @cfgm{/CPUM/IsaExts/AVX, boolean, depends}
2824 * Expose the AVX instruction set extensions to the guest if available and
2825 * XSAVE is exposed too. For the time being the default is to only expose this
2826 * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
2827 */
2828 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX", &pConfig->enmAvx, fNestedPagingAndFullGuestExec,
2829 fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
2830 AssertLogRelRCReturn(rc, rc);
2831
2832 /** @cfgm{/CPUM/IsaExts/AVX2, boolean, depends}
2833 * Expose the AVX2 instruction set extensions to the guest if available and
2834 * XSAVE is exposed too. For the time being the default is to only expose this
2835 * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
2836 */
2837 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX2", &pConfig->enmAvx2, fNestedPagingAndFullGuestExec /* temporarily */,
2838 fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
2839 AssertLogRelRCReturn(rc, rc);
2840
2841 /** @cfgm{/CPUM/IsaExts/AESNI, isaextcfg, depends}
2842 * Whether to expose the AES instructions to the guest. For the time being the
2843 * default is to only do this for VMs with nested paging and AMD-V or
2844 * unrestricted guest mode.
2845 */
2846 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AESNI", &pConfig->enmAesNi, fNestedPagingAndFullGuestExec);
2847 AssertLogRelRCReturn(rc, rc);
2848
2849 /** @cfgm{/CPUM/IsaExts/PCLMUL, isaextcfg, depends}
2850 * Whether to expose the PCLMULQDQ instructions to the guest. For the time
2851 * being the default is to only do this for VMs with nested paging and AMD-V or
2852 * unrestricted guest mode.
2853 */
2854 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "PCLMUL", &pConfig->enmPClMul, fNestedPagingAndFullGuestExec);
2855 AssertLogRelRCReturn(rc, rc);
2856
2857 /** @cfgm{/CPUM/IsaExts/POPCNT, isaextcfg, true}
2858 * Whether to expose the POPCNT instructions to the guest.
2859 */
2860 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "POPCNT", &pConfig->enmPopCnt, CPUMISAEXTCFG_ENABLED_SUPPORTED);
2861 AssertLogRelRCReturn(rc, rc);
2862
2863 /** @cfgm{/CPUM/IsaExts/MOVBE, isaextcfg, depends}
2864 * Whether to expose the MOVBE instructions to the guest. For the time
2865 * being the default is to only do this for VMs with nested paging and AMD-V or
2866 * unrestricted guest mode.
2867 */
2868 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MOVBE", &pConfig->enmMovBe, fNestedPagingAndFullGuestExec);
2869 AssertLogRelRCReturn(rc, rc);
2870
2871 /** @cfgm{/CPUM/IsaExts/RDRAND, isaextcfg, depends}
2872 * Whether to expose the RDRAND instructions to the guest. For the time being
2873 * the default is to only do this for VMs with nested paging and AMD-V or
2874 * unrestricted guest mode.
2875 */
2876 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDRAND", &pConfig->enmRdRand, fNestedPagingAndFullGuestExec);
2877 AssertLogRelRCReturn(rc, rc);
2878
2879 /** @cfgm{/CPUM/IsaExts/RDSEED, isaextcfg, depends}
2880 * Whether to expose the RDSEED instructions to the guest. For the time being
2881 * the default is to only do this for VMs with nested paging and AMD-V or
2882 * unrestricted guest mode.
2883 */
2884 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDSEED", &pConfig->enmRdSeed, fNestedPagingAndFullGuestExec);
2885 AssertLogRelRCReturn(rc, rc);
2886
2887 /** @cfgm{/CPUM/IsaExts/ADX, isaextcfg, depends}
2888 * Whether to expose the ADX instructions to the guest. For the time being
2889 * the default is to only do this for VMs with nested paging and AMD-V or
2890 * unrestricted guest mode.
2891 */
2892 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ADX", &pConfig->enmAdx, fNestedPagingAndFullGuestExec);
2893 AssertLogRelRCReturn(rc, rc);
2894
2895 /** @cfgm{/CPUM/IsaExts/CLFLUSHOPT, isaextcfg, depends}
2896 * Whether to expose the CLFLUSHOPT instructions to the guest. For the time
2897 * being the default is to only do this for VMs with nested paging and AMD-V or
2898 * unrestricted guest mode.
2899 */
2900 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "CLFLUSHOPT", &pConfig->enmCLFlushOpt, fNestedPagingAndFullGuestExec);
2901 AssertLogRelRCReturn(rc, rc);
2902
2903 /** @cfgm{/CPUM/IsaExts/SHA, isaextcfg, depends}
2904 * Whether to expose the SHA instructions to the guest. For the time being
2905 * the default is to only do this for VMs with nested paging and AMD-V or
2906 * unrestricted guest mode.
2907 */
2908 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SHA", &pConfig->enmSha, fNestedPagingAndFullGuestExec);
2909 AssertLogRelRCReturn(rc, rc);
2910
2911 /** @cfgm{/CPUM/IsaExts/FSGSBASE, isaextcfg, true}
2912 * Whether to expose the read/write FSGSBASE instructions to the guest.
2913 */
2914 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "FSGSBASE", &pConfig->enmFsGsBase, true);
2915 AssertLogRelRCReturn(rc, rc);
2916
2917 /** @cfgm{/CPUM/IsaExts/PCID, isaextcfg, true}
2918 * Whether to expose the PCID feature to the guest.
2919 */
2920 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "PCID", &pConfig->enmPcid, pConfig->enmFsGsBase);
2921 AssertLogRelRCReturn(rc, rc);
2922
2923 /** @cfgm{/CPUM/IsaExts/INVPCID, isaextcfg, true}
2924 * Whether to expose the INVPCID instruction to the guest.
2925 */
2926 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "INVPCID", &pConfig->enmInvpcid, pConfig->enmFsGsBase);
2927 AssertLogRelRCReturn(rc, rc);
2928
2929 /** @cfgm{/CPUM/IsaExts/FlushCmdMsr, isaextcfg, true}
2930 * Whether to expose the IA32_FLUSH_CMD MSR to the guest.
2931 */
2932 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "FlushCmdMsr", &pConfig->enmFlushCmdMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED);
2933 AssertLogRelRCReturn(rc, rc);
2934
2935 /** @cfgm{/CPUM/IsaExts/MdsClear, isaextcfg, true}
2936 * Whether to advertise the VERW and MDS related IA32_FLUSH_CMD MSR bits to
2937 * the guest. Requires FlushCmdMsr to be present too.
2938 */
2939 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MdsClear", &pConfig->enmMdsClear, CPUMISAEXTCFG_ENABLED_SUPPORTED);
2940 AssertLogRelRCReturn(rc, rc);
2941
2942 /** @cfgm{/CPUM/IsaExts/ArchCapMSr, isaextcfg, true}
2943 * Whether to expose the MSR_IA32_ARCH_CAPABILITIES MSR to the guest.
2944 */
2945 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ArchCapMsr", &pConfig->enmArchCapMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED);
2946 AssertLogRelRCReturn(rc, rc);
2947
2948
2949 /* AMD: */
2950
2951 /** @cfgm{/CPUM/IsaExts/ABM, isaextcfg, true}
2952 * Whether to expose the AMD ABM instructions to the guest.
2953 */
2954 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ABM", &pConfig->enmAbm, CPUMISAEXTCFG_ENABLED_SUPPORTED);
2955 AssertLogRelRCReturn(rc, rc);
2956
2957 /** @cfgm{/CPUM/IsaExts/SSE4A, isaextcfg, depends}
2958 * Whether to expose the AMD SSE4A instructions to the guest. For the time
2959 * being the default is to only do this for VMs with nested paging and AMD-V or
2960 * unrestricted guest mode.
2961 */
2962 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SSE4A", &pConfig->enmSse4A, fNestedPagingAndFullGuestExec);
2963 AssertLogRelRCReturn(rc, rc);
2964
2965 /** @cfgm{/CPUM/IsaExts/MISALNSSE, isaextcfg, depends}
2966 * Whether to expose the AMD MisAlSse feature (MXCSR flag 17) to the guest. For
2967 * the time being the default is to only do this for VMs with nested paging and
2968 * AMD-V or unrestricted guest mode.
2969 */
2970 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MISALNSSE", &pConfig->enmMisAlnSse, fNestedPagingAndFullGuestExec);
2971 AssertLogRelRCReturn(rc, rc);
2972
2973 /** @cfgm{/CPUM/IsaExts/3DNOWPRF, isaextcfg, depends}
2974 * Whether to expose the AMD 3D Now! prefetch instructions to the guest.
2975 * For the time being the default is to only do this for VMs with nested paging
2976 * and AMD-V or unrestricted guest mode.
2977 */
2978 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "3DNOWPRF", &pConfig->enm3dNowPrf, fNestedPagingAndFullGuestExec);
2979 AssertLogRelRCReturn(rc, rc);
2980
2981 /** @cfgm{/CPUM/IsaExts/AXMMX, isaextcfg, depends}
2982 * Whether to expose the AMD's MMX Extensions to the guest. For the time being
2983 * the default is to only do this for VMs with nested paging and AMD-V or
2984 * unrestricted guest mode.
2985 */
2986 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AXMMX", &pConfig->enmAmdExtMmx, fNestedPagingAndFullGuestExec);
2987 AssertLogRelRCReturn(rc, rc);
2988
2989 return VINF_SUCCESS;
2990}
2991
2992
2993/**
2994 * Initializes the emulated CPU's CPUID & MSR information.
2995 *
2996 * @returns VBox status code.
2997 * @param pVM The cross context VM structure.
2998 * @param pHostMsrs Pointer to the host MSRs.
2999 */
3000int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs)
3001{
3002 Assert(pHostMsrs);
3003
3004 PCPUM pCpum = &pVM->cpum.s;
3005 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
3006
3007 /*
3008 * Set the fCpuIdApicFeatureVisible flags so the APIC can assume visibility
3009 * on construction and manage everything from here on.
3010 */
3011 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3012 {
3013 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3014 pVCpu->cpum.s.fCpuIdApicFeatureVisible = true;
3015 }
3016
3017 /*
3018 * Read the configuration.
3019 */
3020 CPUMCPUIDCONFIG Config;
3021 RT_ZERO(Config);
3022
3023 bool const fNestedPagingAndFullGuestExec = VM_IS_NEM_ENABLED(pVM)
3024 || HMAreNestedPagingAndFullGuestExecEnabled(pVM);
3025 int rc = cpumR3CpuIdReadConfig(pVM, &Config, pCpumCfg, fNestedPagingAndFullGuestExec);
3026 AssertRCReturn(rc, rc);
3027
3028 /*
3029 * Get the guest CPU data from the database and/or the host.
3030 *
3031 * The CPUID and MSRs are currently living on the regular heap to avoid
3032 * fragmenting the hyper heap (and because there isn't/wasn't any realloc
3033 * API for the hyper heap). This means special cleanup considerations.
3034 */
3035 /** @todo The hyper heap will be removed ASAP, so the final destination is
3036 * now a fixed sized arrays in the VM structure. Maybe we can simplify
3037 * this allocation fun a little now? Or maybe it's too convenient for
3038 * the CPU reporter code... No time to figure that out now. */
3039 rc = cpumR3DbGetCpuInfo(Config.szCpuName, &pCpum->GuestInfo);
3040 if (RT_FAILURE(rc))
3041 return rc == VERR_CPUM_DB_CPU_NOT_FOUND
3042 ? VMSetError(pVM, rc, RT_SRC_POS,
3043 "Info on guest CPU '%s' could not be found. Please, select a different CPU.", Config.szCpuName)
3044 : rc;
3045
3046#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
3047 if (pCpum->GuestInfo.fMxCsrMask & ~pVM->cpum.s.fHostMxCsrMask)
3048 {
3049 LogRel(("Stripping unsupported MXCSR bits from guest mask: %#x -> %#x (host: %#x)\n", pCpum->GuestInfo.fMxCsrMask,
3050 pCpum->GuestInfo.fMxCsrMask & pVM->cpum.s.fHostMxCsrMask, pVM->cpum.s.fHostMxCsrMask));
3051 pCpum->GuestInfo.fMxCsrMask &= pVM->cpum.s.fHostMxCsrMask;
3052 }
3053 LogRel(("CPUM: MXCSR_MASK=%#x (host: %#x)\n", pCpum->GuestInfo.fMxCsrMask, pVM->cpum.s.fHostMxCsrMask));
3054#else
3055 LogRel(("CPUM: MXCSR_MASK=%#x\n", pCpum->GuestInfo.fMxCsrMask));
3056#endif
3057
3058 /** @cfgm{/CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
3059 * Overrides the guest MSRs.
3060 */
3061 rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
3062
3063 /** @cfgm{/CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
3064 * Overrides the CPUID leaf values (from the host CPU usually) used for
3065 * calculating the guest CPUID leaves. This can be used to preserve the CPUID
3066 * values when moving a VM to a different machine. Another use is restricting
3067 * (or extending) the feature set exposed to the guest. */
3068 if (RT_SUCCESS(rc))
3069 rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
3070
3071 if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
3072 rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
3073 "Found unsupported configuration node '/CPUM/CPUID/'. "
3074 "Please use IMachine::setCPUIDLeaf() instead.");
3075
3076 CPUMMSRS GuestMsrs;
3077 RT_ZERO(GuestMsrs);
3078
3079 /*
3080 * Pre-explode the CPUID info.
3081 */
3082 if (RT_SUCCESS(rc))
3083 rc = cpumCpuIdExplodeFeaturesX86(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs,
3084 &pCpum->GuestFeatures);
3085
3086 /*
3087 * Sanitize the cpuid information passed on to the guest.
3088 */
3089 if (RT_SUCCESS(rc))
3090 {
3091 rc = cpumR3CpuIdSanitize(pVM, pCpum, &Config);
3092 if (RT_SUCCESS(rc))
3093 {
3094 cpumR3CpuIdLimitLeaves(pCpum, &Config);
3095 cpumR3CpuIdLimitIntelFamModStep(pCpum, &Config);
3096 }
3097 }
3098
3099 /*
3100 * Setup MSRs introduced in microcode updates or that are otherwise not in
3101 * the CPU profile, but are advertised in the CPUID info we just sanitized.
3102 */
3103 if (RT_SUCCESS(rc))
3104 rc = cpumR3MsrReconcileWithCpuId(pVM);
3105 /*
3106 * MSR fudging.
3107 */
3108 if (RT_SUCCESS(rc))
3109 {
3110 /** @cfgm{/CPUM/FudgeMSRs, boolean, true}
3111 * Fudges some common MSRs if not present in the selected CPU database entry.
3112 * This is for trying to keep VMs running when moved between different hosts
3113 * and different CPU vendors. */
3114 bool fEnable;
3115 rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRC(rc);
3116 if (RT_SUCCESS(rc) && fEnable)
3117 {
3118 rc = cpumR3MsrApplyFudge(pVM);
3119 AssertLogRelRC(rc);
3120 }
3121 }
3122 if (RT_SUCCESS(rc))
3123 {
3124 /*
3125 * Move the MSR and CPUID arrays over to the static VM structure allocations
3126 * and explode guest CPU features again.
3127 */
3128 void *pvFree = pCpum->GuestInfo.paCpuIdLeavesR3;
3129 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCpum, pCpum->GuestInfo.paCpuIdLeavesR3,
3130 pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs);
3131 RTMemFree(pvFree);
3132
3133 AssertFatalMsg(pCpum->GuestInfo.cMsrRanges <= RT_ELEMENTS(pCpum->GuestInfo.aMsrRanges),
3134 ("%u\n", pCpum->GuestInfo.cMsrRanges));
3135 memcpy(pCpum->GuestInfo.aMsrRanges, pCpum->GuestInfo.paMsrRangesR3,
3136 sizeof(pCpum->GuestInfo.paMsrRangesR3[0]) * pCpum->GuestInfo.cMsrRanges);
3137 RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
3138 pCpum->GuestInfo.paMsrRangesR3 = pCpum->GuestInfo.aMsrRanges;
3139
3140 AssertLogRelRCReturn(rc, rc);
3141
3142 /*
3143 * Some more configuration that we're applying at the end of everything
3144 * via the CPUMR3SetGuestCpuIdFeature API.
3145 */
3146
3147 /* Check if 64-bit guest supported was enabled. */
3148 bool fEnable64bit;
3149 rc = CFGMR3QueryBoolDef(pCpumCfg, "Enable64bit", &fEnable64bit, false);
3150 AssertRCReturn(rc, rc);
3151 if (fEnable64bit)
3152 {
3153 /* In case of a CPU upgrade: */
3154 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
3155 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* (Long mode only on Intel CPUs.) */
3156 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
3157 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
3158 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
3159
3160 /* The actual feature: */
3161 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3162 }
3163
3164 /* Check if PAE was explicitely enabled by the user. */
3165 bool fEnable;
3166 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, fEnable64bit);
3167 AssertRCReturn(rc, rc);
3168 if (fEnable && !pVM->cpum.s.GuestFeatures.fPae)
3169 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
3170
3171 /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
3172 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, fEnable64bit);
3173 AssertRCReturn(rc, rc);
3174 if (fEnable && !pVM->cpum.s.GuestFeatures.fNoExecute)
3175 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
3176
3177 /* Check if speculation control is enabled. */
3178 rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &fEnable, false);
3179 AssertRCReturn(rc, rc);
3180 if (fEnable)
3181 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SPEC_CTRL);
3182 else
3183 {
3184 /*
3185 * Set the "SSBD-not-needed" flag to work around a bug in some Linux kernels when the VIRT_SPEC_CTL
3186 * feature is not exposed on AMD CPUs and there is only 1 vCPU configured.
3187 * This was observed with kernel "4.15.0-29-generic #31~16.04.1-Ubuntu" but more versions are likely affected.
3188 *
3189 * The kernel doesn't initialize a lock and causes a NULL pointer exception later on when configuring SSBD:
3190 * EIP: _raw_spin_lock+0x14/0x30
3191 * EFLAGS: 00010046 CPU: 0
3192 * EAX: 00000000 EBX: 00000001 ECX: 00000004 EDX: 00000000
3193 * ESI: 00000000 EDI: 00000000 EBP: ee023f1c ESP: ee023f18
3194 * DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
3195 * CR0: 80050033 CR2: 00000004 CR3: 3671c180 CR4: 000006f0
3196 * Call Trace:
3197 * speculative_store_bypass_update+0x8e/0x180
3198 * ssb_prctl_set+0xc0/0xe0
3199 * arch_seccomp_spec_mitigate+0x1d/0x20
3200 * do_seccomp+0x3cb/0x610
3201 * SyS_seccomp+0x16/0x20
3202 * do_fast_syscall_32+0x7f/0x1d0
3203 * entry_SYSENTER_32+0x4e/0x7c
3204 *
3205 * The lock would've been initialized in process.c:speculative_store_bypass_ht_init() called from two places in smpboot.c.
3206 * First when a secondary CPU is started and second in native_smp_prepare_cpus() which is not called in a single vCPU environment.
3207 *
3208 * As spectre control features are completely disabled anyway when we arrived here there is no harm done in informing the
3209 * guest to not even try.
3210 */
3211 if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3212 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
3213 {
3214 PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0);
3215 if (pLeaf)
3216 {
3217 pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_NO_SSBD_REQUIRED;
3218 LogRel(("CPUM: Set SSBD not required flag for AMD to work around some buggy Linux kernels!\n"));
3219 }
3220 }
3221 }
3222
3223 /*
3224 * Finally, initialize guest VMX MSRs.
3225 *
3226 * This needs to be done -after- exploding guest features and sanitizing CPUID leaves
3227 * as constructing VMX capabilities MSRs rely on CPU feature bits like long mode,
3228 * unrestricted-guest execution, CR4 feature bits and possibly more in the future.
3229 */
3230 /** @todo r=bird: given that long mode never used to be enabled before the
3231 * VMINITCOMPLETED_RING0 state, and we're a lot earlier here in ring-3
3232 * init, the above comment cannot be entirely accurate. */
3233 if (pVM->cpum.s.GuestFeatures.fVmx)
3234 {
3235 Assert(Config.fNestedHWVirt);
3236 cpumR3InitVmxGuestFeaturesAndMsrs(pVM, pCpumCfg, &pHostMsrs->hwvirt.vmx, &GuestMsrs.hwvirt.vmx);
3237
3238 /* Copy MSRs to all VCPUs */
3239 PCVMXMSRS pVmxMsrs = &GuestMsrs.hwvirt.vmx;
3240 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3241 {
3242 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3243 memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
3244 }
3245 }
3246
3247 return VINF_SUCCESS;
3248 }
3249
3250 /*
3251 * Failed before switching to hyper heap.
3252 */
3253 RTMemFree(pCpum->GuestInfo.paCpuIdLeavesR3);
3254 pCpum->GuestInfo.paCpuIdLeavesR3 = NULL;
3255 RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
3256 pCpum->GuestInfo.paMsrRangesR3 = NULL;
3257 return rc;
3258}
3259
3260
3261/**
3262 * Sets a CPUID feature bit during VM initialization.
3263 *
3264 * Since the CPUID feature bits are generally related to CPU features, other
3265 * CPUM configuration like MSRs can also be modified by calls to this API.
3266 *
3267 * @param pVM The cross context VM structure.
3268 * @param enmFeature The feature to set.
3269 */
3270VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
3271{
3272 PCPUMCPUIDLEAF pLeaf;
3273 PCPUMMSRRANGE pMsrRange;
3274
3275#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
3276# define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \
3277 if (!pVM->cpum.s.HostFeatures. a_fFeature) \
3278 { \
3279 LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when the host doesn't support it!\n")); \
3280 return; \
3281 } else do { } while (0)
3282#else
3283# define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) do { } while (0)
3284#endif
3285
3286#define GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \
3287 do \
3288 { \
3289 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001)); \
3290 if (!pLeaf) \
3291 { \
3292 LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when no 0x80000001 CPUID leaf!\n")); \
3293 return; \
3294 } \
3295 CHECK_X86_HOST_FEATURE_RET(a_fFeature,a_szFeature); \
3296 } while (0)
3297
3298 switch (enmFeature)
3299 {
3300 /*
3301 * Set the APIC bit in both feature masks.
3302 */
3303 case CPUMCPUIDFEATURE_APIC:
3304 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3305 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
3306 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
3307
3308 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3309 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
3310 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
3311
3312 pVM->cpum.s.GuestFeatures.fApic = 1;
3313
3314 /* Make sure we've got the APICBASE MSR present. */
3315 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_APICBASE);
3316 if (!pMsrRange)
3317 {
3318 static CPUMMSRRANGE const s_ApicBase =
3319 {
3320 /*.uFirst =*/ MSR_IA32_APICBASE, /*.uLast =*/ MSR_IA32_APICBASE,
3321 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ApicBase, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32ApicBase,
3322 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
3323 /*.szName = */ "IA32_APIC_BASE"
3324 };
3325 int rc = CPUMR3MsrRangesInsert(pVM, &s_ApicBase);
3326 AssertLogRelRC(rc);
3327 }
3328
3329 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled xAPIC\n"));
3330 break;
3331
3332 /*
3333 * Set the x2APIC bit in the standard feature mask.
3334 * Note! ASSUMES CPUMCPUIDFEATURE_APIC is called first.
3335 */
3336 case CPUMCPUIDFEATURE_X2APIC:
3337 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3338 if (pLeaf)
3339 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
3340 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
3341
3342 /* Make sure the MSR doesn't GP or ignore the EXTD bit. */
3343 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_APICBASE);
3344 if (pMsrRange)
3345 {
3346 pMsrRange->fWrGpMask &= ~MSR_IA32_APICBASE_EXTD;
3347 pMsrRange->fWrIgnMask &= ~MSR_IA32_APICBASE_EXTD;
3348 }
3349
3350 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
3351 break;
3352
3353 /*
3354 * Set the sysenter/sysexit bit in the standard feature mask.
3355 * Assumes the caller knows what it's doing! (host must support these)
3356 */
3357 case CPUMCPUIDFEATURE_SEP:
3358 CHECK_X86_HOST_FEATURE_RET(fSysEnter, "SEP");
3359 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3360 if (pLeaf)
3361 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
3362 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
3363 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
3364 break;
3365
3366 /*
3367 * Set the syscall/sysret bit in the extended feature mask.
3368 * Assumes the caller knows what it's doing! (host must support these)
3369 */
3370 case CPUMCPUIDFEATURE_SYSCALL:
3371 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fSysCall, "SYSCALL/SYSRET");
3372
3373 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
3374 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
3375 pVM->cpum.s.GuestFeatures.fSysCall = 1;
3376 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
3377 break;
3378
3379 /*
3380 * Set the PAE bit in both feature masks.
3381 * Assumes the caller knows what it's doing! (host must support these)
3382 */
3383 case CPUMCPUIDFEATURE_PAE:
3384 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3385 if (pLeaf)
3386 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
3387
3388 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3389 if ( pLeaf
3390 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3391 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
3392 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
3393
3394 pVM->cpum.s.GuestFeatures.fPae = 1;
3395 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
3396 break;
3397
3398 /*
3399 * Set the LONG MODE bit in the extended feature mask.
3400 * Assumes the caller knows what it's doing! (host must support these)
3401 */
3402 case CPUMCPUIDFEATURE_LONG_MODE:
3403 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fLongMode, "LONG MODE");
3404
3405 /* Valid for both Intel and AMD. */
3406 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
3407 pVM->cpum.s.GuestFeatures.fLongMode = 1;
3408 pVM->cpum.s.GuestFeatures.cVmxMaxPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
3409 if (pVM->cpum.s.GuestFeatures.fVmx)
3410 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3411 {
3412 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3413 pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic &= ~VMX_BASIC_PHYSADDR_WIDTH_32BIT;
3414 }
3415 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
3416 break;
3417
3418 /*
3419 * Set the NX/XD bit in the extended feature mask.
3420 * Assumes the caller knows what it's doing! (host must support these)
3421 */
3422 case CPUMCPUIDFEATURE_NX:
3423 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fNoExecute, "NX/XD");
3424
3425 /* Valid for both Intel and AMD. */
3426 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
3427 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
3428 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
3429 break;
3430
3431
3432 /*
3433 * Set the LAHF/SAHF support in 64-bit mode.
3434 * Assumes the caller knows what it's doing! (host must support this)
3435 */
3436 case CPUMCPUIDFEATURE_LAHF:
3437 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fLahfSahf, "LAHF/SAHF");
3438
3439 /* Valid for both Intel and AMD. */
3440 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
3441 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
3442 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
3443 break;
3444
3445 /*
3446 * Set the RDTSCP support bit.
3447 * Assumes the caller knows what it's doing! (host must support this)
3448 */
3449 case CPUMCPUIDFEATURE_RDTSCP:
3450 if (pVM->cpum.s.u8PortableCpuIdLevel > 0)
3451 return;
3452 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fRdTscP, "RDTSCP");
3453 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3454
3455 /* Valid for both Intel and AMD. */
3456 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
3457 pVM->cpum.s.HostFeatures.fRdTscP = 1;
3458 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
3459 break;
3460
3461 /*
3462 * Set the Hypervisor Present bit in the standard feature mask.
3463 */
3464 case CPUMCPUIDFEATURE_HVP:
3465 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3466 if (pLeaf)
3467 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
3468 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
3469 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
3470 break;
3471
3472 /*
3473 * Set up the speculation control CPUID bits and MSRs. This is quite complicated
3474 * on Intel CPUs, and different on AMDs.
3475 */
3476 case CPUMCPUIDFEATURE_SPEC_CTRL:
3477 if (pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
3478 {
3479 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
3480 if ( !pLeaf
3481 || !(pVM->cpum.s.HostFeatures.fIbpb || pVM->cpum.s.HostFeatures.fIbrs))
3482 {
3483 LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));
3484 return;
3485 }
3486
3487 /* The feature can be enabled. Let's see what we can actually do. */
3488 pVM->cpum.s.GuestFeatures.fSpeculationControl = 1;
3489
3490 /* We will only expose STIBP if IBRS is present to keep things simpler (simple is not an option). */
3491 if (pVM->cpum.s.HostFeatures.fIbrs)
3492 {
3493 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB;
3494 pVM->cpum.s.GuestFeatures.fIbrs = 1;
3495 if (pVM->cpum.s.HostFeatures.fStibp)
3496 {
3497 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_STIBP;
3498 pVM->cpum.s.GuestFeatures.fStibp = 1;
3499 }
3500
3501 /* Make sure we have the speculation control MSR... */
3502 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_SPEC_CTRL);
3503 if (!pMsrRange)
3504 {
3505 static CPUMMSRRANGE const s_SpecCtrl =
3506 {
3507 /*.uFirst =*/ MSR_IA32_SPEC_CTRL, /*.uLast =*/ MSR_IA32_SPEC_CTRL,
3508 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32SpecCtrl, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32SpecCtrl,
3509 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
3510 /*.szName = */ "IA32_SPEC_CTRL"
3511 };
3512 int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl);
3513 AssertLogRelRC(rc);
3514 }
3515
3516 /* ... and the predictor command MSR. */
3517 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_PRED_CMD);
3518 if (!pMsrRange)
3519 {
3520 /** @todo incorrect fWrGpMask. */
3521 static CPUMMSRRANGE const s_SpecCtrl =
3522 {
3523 /*.uFirst =*/ MSR_IA32_PRED_CMD, /*.uLast =*/ MSR_IA32_PRED_CMD,
3524 /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32PredCmd,
3525 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
3526 /*.szName = */ "IA32_PRED_CMD"
3527 };
3528 int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl);
3529 AssertLogRelRC(rc);
3530 }
3531
3532 }
3533
3534 if (pVM->cpum.s.HostFeatures.fArchCap)
3535 {
3536 /* Install the architectural capabilities MSR. */
3537 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES);
3538 if (!pMsrRange)
3539 {
3540 static CPUMMSRRANGE const s_ArchCaps =
3541 {
3542 /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES, /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES,
3543 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities, /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly,
3544 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ UINT64_MAX,
3545 /*.szName = */ "IA32_ARCH_CAPABILITIES"
3546 };
3547 int rc = CPUMR3MsrRangesInsert(pVM, &s_ArchCaps);
3548 AssertLogRelRC(rc);
3549 }
3550
3551 /* Advertise IBRS_ALL if present at this point... */
3552 if (pVM->cpum.s.HostFeatures.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL)
3553 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL);
3554 }
3555
3556 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n"));
3557 }
3558 else if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3559 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
3560 {
3561 /* The precise details of AMD's implementation are not yet clear. */
3562 }
3563 break;
3564
3565 default:
3566 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
3567 break;
3568 }
3569
3570 /** @todo can probably kill this as this API is now init time only... */
3571 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3572 {
3573 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3574 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
3575 }
3576
3577#undef GET_8000_0001_CHECK_X86_HOST_FEATURE_RET
3578#undef CHECK_X86_HOST_FEATURE_RET
3579}
3580
3581
3582/**
3583 * Queries a CPUID feature bit.
3584 *
3585 * @returns boolean for feature presence
3586 * @param pVM The cross context VM structure.
3587 * @param enmFeature The feature to query.
3588 * @deprecated Use the cpum.ro.GuestFeatures directly instead.
3589 */
3590VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
3591{
3592 switch (enmFeature)
3593 {
3594 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
3595 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
3596 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
3597 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
3598 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
3599 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
3600 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
3601 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
3602 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
3603 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
3604 case CPUMCPUIDFEATURE_SPEC_CTRL: return pVM->cpum.s.GuestFeatures.fSpeculationControl;
3605 case CPUMCPUIDFEATURE_INVALID:
3606 case CPUMCPUIDFEATURE_32BIT_HACK:
3607 break;
3608 }
3609 AssertFailed();
3610 return false;
3611}
3612
3613
3614/**
3615 * Clears a CPUID feature bit.
3616 *
3617 * @param pVM The cross context VM structure.
3618 * @param enmFeature The feature to clear.
3619 *
3620 * @deprecated Probably better to default the feature to disabled and only allow
3621 * setting (enabling) it during construction.
3622 */
3623VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
3624{
3625 PCPUMCPUIDLEAF pLeaf;
3626 switch (enmFeature)
3627 {
3628 case CPUMCPUIDFEATURE_APIC:
3629 Assert(!pVM->cpum.s.GuestFeatures.fApic); /* We only expect this call during init. No MSR adjusting needed. */
3630 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3631 if (pLeaf)
3632 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
3633
3634 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3635 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
3636 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
3637
3638 pVM->cpum.s.GuestFeatures.fApic = 0;
3639 Log(("CPUM: ClearGuestCpuIdFeature: Disabled xAPIC\n"));
3640 break;
3641
3642 case CPUMCPUIDFEATURE_X2APIC:
3643 Assert(!pVM->cpum.s.GuestFeatures.fX2Apic); /* We only expect this call during init. No MSR adjusting needed. */
3644 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3645 if (pLeaf)
3646 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
3647 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
3648 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
3649 break;
3650
3651#if 0
3652 case CPUMCPUIDFEATURE_PAE:
3653 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3654 if (pLeaf)
3655 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
3656
3657 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3658 if ( pLeaf
3659 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3660 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
3661 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
3662
3663 pVM->cpum.s.GuestFeatures.fPae = 0;
3664 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
3665 break;
3666
3667 case CPUMCPUIDFEATURE_LONG_MODE:
3668 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3669 if (pLeaf)
3670 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
3671 pVM->cpum.s.GuestFeatures.fLongMode = 0;
3672 pVM->cpum.s.GuestFeatures.cVmxMaxPhysAddrWidth = 32;
3673 if (pVM->cpum.s.GuestFeatures.fVmx)
3674 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3675 {
3676 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3677 pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic |= VMX_BASIC_PHYSADDR_WIDTH_32BIT;
3678 }
3679 break;
3680
3681 case CPUMCPUIDFEATURE_LAHF:
3682 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3683 if (pLeaf)
3684 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
3685 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
3686 break;
3687#endif
3688 case CPUMCPUIDFEATURE_RDTSCP:
3689 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3690 if (pLeaf)
3691 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
3692 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
3693 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
3694 break;
3695
3696#if 0
3697 case CPUMCPUIDFEATURE_HVP:
3698 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3699 if (pLeaf)
3700 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
3701 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
3702 break;
3703
3704 case CPUMCPUIDFEATURE_SPEC_CTRL:
3705 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
3706 if (pLeaf)
3707 pLeaf->uEdx &= ~(X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB | X86_CPUID_STEXT_FEATURE_EDX_STIBP);
3708 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps &= ~MSR_IA32_ARCH_CAP_F_IBRS_ALL);
3709 Log(("CPUM: ClearGuestCpuIdFeature: Disabled speculation control!\n"));
3710 break;
3711#endif
3712 default:
3713 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
3714 break;
3715 }
3716
3717 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3718 {
3719 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3720 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
3721 }
3722}
3723
3724
3725/**
3726 * Do some final polishing after all calls to CPUMR3SetGuestCpuIdFeature and
3727 * CPUMR3ClearGuestCpuIdFeature are (probably) done.
3728 *
3729 * @param pVM The cross context VM structure.
3730 */
3731void cpumR3CpuIdRing3InitDone(PVM pVM)
3732{
3733 /*
3734 * Do not advertise NX w/o PAE, seems to confuse windows 7 (black screen very
3735 * early in real mode).
3736 */
3737 PCPUMCPUIDLEAF pStdLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3738 PCPUMCPUIDLEAF pExtLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3739 if (pStdLeaf && pExtLeaf)
3740 {
3741 if ( !(pStdLeaf->uEdx & X86_CPUID_FEATURE_EDX_PAE)
3742 && (pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_NX))
3743 pExtLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_NX;
3744 }
3745}
3746
3747
3748/*
3749 *
3750 *
3751 * Saved state related code.
3752 * Saved state related code.
3753 * Saved state related code.
3754 *
3755 *
3756 */
3757
3758/**
3759 * Called both in pass 0 and the final pass.
3760 *
3761 * @param pVM The cross context VM structure.
3762 * @param pSSM The saved state handle.
3763 */
3764void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
3765{
3766 /*
3767 * Save all the CPU ID leaves.
3768 */
3769 SSMR3PutU32(pSSM, sizeof(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3[0]));
3770 SSMR3PutU32(pSSM, pVM->cpum.s.GuestInfo.cCpuIdLeaves);
3771 SSMR3PutMem(pSSM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3,
3772 sizeof(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3[0]) * pVM->cpum.s.GuestInfo.cCpuIdLeaves);
3773
3774 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
3775
3776 /*
3777 * Save a good portion of the raw CPU IDs as well as they may come in
3778 * handy when validating features for raw mode.
3779 */
3780#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
3781 CPUMCPUID aRawStd[16];
3782 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
3783 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
3784 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
3785 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
3786
3787 CPUMCPUID aRawExt[32];
3788 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
3789 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
3790 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
3791 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
3792
3793#else
3794 /* Two zero counts on non-x86 hosts. */
3795 SSMR3PutU32(pSSM, 0);
3796 SSMR3PutU32(pSSM, 0);
3797#endif
3798}
3799
3800
3801static int cpumR3LoadOneOldGuestCpuIdArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
3802{
3803 uint32_t cCpuIds;
3804 int rc = SSMR3GetU32(pSSM, &cCpuIds);
3805 if (RT_SUCCESS(rc))
3806 {
3807 if (cCpuIds < 64)
3808 {
3809 for (uint32_t i = 0; i < cCpuIds; i++)
3810 {
3811 CPUMCPUID CpuId;
3812 rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
3813 if (RT_FAILURE(rc))
3814 break;
3815
3816 CPUMCPUIDLEAF NewLeaf;
3817 NewLeaf.uLeaf = uBase + i;
3818 NewLeaf.uSubLeaf = 0;
3819 NewLeaf.fSubLeafMask = 0;
3820 NewLeaf.uEax = CpuId.uEax;
3821 NewLeaf.uEbx = CpuId.uEbx;
3822 NewLeaf.uEcx = CpuId.uEcx;
3823 NewLeaf.uEdx = CpuId.uEdx;
3824 NewLeaf.fFlags = 0;
3825 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf);
3826 }
3827 }
3828 else
3829 rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3830 }
3831 if (RT_FAILURE(rc))
3832 {
3833 RTMemFree(*ppaLeaves);
3834 *ppaLeaves = NULL;
3835 *pcLeaves = 0;
3836 }
3837 return rc;
3838}
3839
3840
3841static int cpumR3LoadGuestCpuIdArray(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
3842{
3843 *ppaLeaves = NULL;
3844 *pcLeaves = 0;
3845
3846 int rc;
3847 if (uVersion > CPUM_SAVED_STATE_VERSION_PUT_STRUCT)
3848 {
3849 /*
3850 * The new format. Starts by declaring the leave size and count.
3851 */
3852 uint32_t cbLeaf;
3853 SSMR3GetU32(pSSM, &cbLeaf);
3854 uint32_t cLeaves;
3855 rc = SSMR3GetU32(pSSM, &cLeaves);
3856 if (RT_SUCCESS(rc))
3857 {
3858 if (cbLeaf == sizeof(**ppaLeaves))
3859 {
3860 if (cLeaves <= CPUM_CPUID_MAX_LEAVES)
3861 {
3862 /*
3863 * Load the leaves one by one.
3864 *
3865 * The uPrev stuff is a kludge for working around a week worth of bad saved
3866 * states during the CPUID revamp in March 2015. We saved too many leaves
3867 * due to a bug in cpumR3CpuIdInstallAndExplodeLeaves, thus ending up with
3868 * garbage entires at the end of the array when restoring. We also had
3869 * a subleaf insertion bug that triggered with the leaf 4 stuff below,
3870 * this kludge doesn't deal correctly with that, but who cares...
3871 */
3872 uint32_t uPrev = 0;
3873 for (uint32_t i = 0; i < cLeaves && RT_SUCCESS(rc); i++)
3874 {
3875 CPUMCPUIDLEAF Leaf;
3876 rc = SSMR3GetMem(pSSM, &Leaf, sizeof(Leaf));
3877 if (RT_SUCCESS(rc))
3878 {
3879 if ( uVersion != CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT
3880 || Leaf.uLeaf >= uPrev)
3881 {
3882 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
3883 uPrev = Leaf.uLeaf;
3884 }
3885 else
3886 uPrev = UINT32_MAX;
3887 }
3888 }
3889 }
3890 else
3891 rc = SSMR3SetLoadError(pSSM, VERR_TOO_MANY_CPUID_LEAVES, RT_SRC_POS,
3892 "Too many CPUID leaves: %#x, max %#x", cLeaves, CPUM_CPUID_MAX_LEAVES);
3893 }
3894 else
3895 rc = SSMR3SetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
3896 "CPUMCPUIDLEAF size differs: saved=%#x, our=%#x", cbLeaf, sizeof(**ppaLeaves));
3897 }
3898 }
3899 else
3900 {
3901 /*
3902 * The old format with its three inflexible arrays.
3903 */
3904 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
3905 if (RT_SUCCESS(rc))
3906 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
3907 if (RT_SUCCESS(rc))
3908 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
3909 if (RT_SUCCESS(rc))
3910 {
3911 /*
3912 * Fake up leaf 4 on intel like we used to do in CPUMGetGuestCpuId earlier.
3913 */
3914 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafInt(*ppaLeaves, *pcLeaves, 0, 0);
3915 if ( pLeaf
3916 && RTX86IsIntelCpu(pLeaf->uEbx, pLeaf->uEcx, pLeaf->uEdx))
3917 {
3918 CPUMCPUIDLEAF Leaf;
3919 Leaf.uLeaf = 4;
3920 Leaf.fSubLeafMask = UINT32_MAX;
3921 Leaf.uSubLeaf = 0;
3922 Leaf.uEdx = UINT32_C(0); /* 3 flags, 0 is fine. */
3923 Leaf.uEcx = UINT32_C(63); /* sets - 1 */
3924 Leaf.uEbx = (UINT32_C(7) << 22) /* associativity -1 */
3925 | (UINT32_C(0) << 12) /* phys line partitions - 1 */
3926 | UINT32_C(63); /* system coherency line size - 1 */
3927 Leaf.uEax = (RT_MIN(pVM->cCpus - 1, UINT32_C(0x3f)) << 26) /* cores per package - 1 */
3928 | (UINT32_C(0) << 14) /* threads per cache - 1 */
3929 | (UINT32_C(1) << 5) /* cache level */
3930 | UINT32_C(1); /* cache type (data) */
3931 Leaf.fFlags = 0;
3932 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
3933 if (RT_SUCCESS(rc))
3934 {
3935 Leaf.uSubLeaf = 1; /* Should've been cache type 2 (code), but buggy code made it data. */
3936 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
3937 }
3938 if (RT_SUCCESS(rc))
3939 {
3940 Leaf.uSubLeaf = 2; /* Should've been cache type 3 (unified), but buggy code made it data. */
3941 Leaf.uEcx = 4095; /* sets - 1 */
3942 Leaf.uEbx &= UINT32_C(0x003fffff); /* associativity - 1 */
3943 Leaf.uEbx |= UINT32_C(23) << 22;
3944 Leaf.uEax &= UINT32_C(0xfc003fff); /* threads per cache - 1 */
3945 Leaf.uEax |= RT_MIN(pVM->cCpus - 1, UINT32_C(0xfff)) << 14;
3946 Leaf.uEax &= UINT32_C(0xffffff1f); /* level */
3947 Leaf.uEax |= UINT32_C(2) << 5;
3948 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
3949 }
3950 }
3951 }
3952 }
3953 return rc;
3954}
3955
3956
3957/**
3958 * Loads the CPU ID leaves saved by pass 0, inner worker.
3959 *
3960 * @returns VBox status code.
3961 * @param pVM The cross context VM structure.
3962 * @param pSSM The saved state handle.
3963 * @param uVersion The format version.
3964 * @param paLeaves Guest CPUID leaves loaded from the state.
3965 * @param cLeaves The number of leaves in @a paLeaves.
3966 * @param pMsrs The guest MSRs.
3967 */
3968static int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
3969{
3970 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
3971#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
3972 AssertMsgFailed(("Port me!"));
3973#endif
3974
3975 /*
3976 * Continue loading the state into stack buffers.
3977 */
3978 CPUMCPUID GuestDefCpuId;
3979 int rc = SSMR3GetMem(pSSM, &GuestDefCpuId, sizeof(GuestDefCpuId));
3980 AssertRCReturn(rc, rc);
3981
3982 CPUMCPUID aRawStd[16];
3983 uint32_t cRawStd;
3984 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
3985 if (cRawStd > RT_ELEMENTS(aRawStd))
3986 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3987 rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
3988 AssertRCReturn(rc, rc);
3989 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
3990#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
3991 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
3992#else
3993 RT_ZERO(aRawStd[i]);
3994#endif
3995
3996 CPUMCPUID aRawExt[32];
3997 uint32_t cRawExt;
3998 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
3999 if (cRawExt > RT_ELEMENTS(aRawExt))
4000 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
4001 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
4002 AssertRCReturn(rc, rc);
4003 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
4004#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4005 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
4006#else
4007 RT_ZERO(aRawExt[i]);
4008#endif
4009
4010 /*
4011 * Get the raw CPU IDs for the current host.
4012 */
4013 CPUMCPUID aHostRawStd[16];
4014#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4015 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
4016 ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].uEax, &aHostRawStd[i].uEbx, &aHostRawStd[i].uEcx, &aHostRawStd[i].uEdx);
4017#else
4018 RT_ZERO(aHostRawStd);
4019#endif
4020
4021 CPUMCPUID aHostRawExt[32];
4022#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4023 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
4024 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,
4025 &aHostRawExt[i].uEax, &aHostRawExt[i].uEbx, &aHostRawExt[i].uEcx, &aHostRawExt[i].uEdx);
4026#else
4027 RT_ZERO(aHostRawExt);
4028#endif
4029
4030 /*
4031 * Get the host and guest overrides so we don't reject the state because
4032 * some feature was enabled thru these interfaces.
4033 * Note! We currently only need the feature leaves, so skip rest.
4034 */
4035 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
4036 CPUMCPUID aHostOverrideStd[2];
4037 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
4038 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);
4039
4040 CPUMCPUID aHostOverrideExt[2];
4041 memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt));
4042 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg);
4043
4044 /*
4045 * This can be skipped.
4046 */
4047 bool fStrictCpuIdChecks;
4048 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks, true);
4049
4050 /*
4051 * Define a bunch of macros for simplifying the santizing/checking code below.
4052 */
4053 /* Generic expression + failure message. */
4054#define CPUID_CHECK_RET(expr, fmt) \
4055 do { \
4056 if (!(expr)) \
4057 { \
4058 char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \
4059 if (fStrictCpuIdChecks) \
4060 { \
4061 int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \
4062 RTStrFree(pszMsg); \
4063 return rcCpuid; \
4064 } \
4065 LogRel(("CPUM: %s\n", pszMsg)); \
4066 RTStrFree(pszMsg); \
4067 } \
4068 } while (0)
4069#define CPUID_CHECK_WRN(expr, fmt) \
4070 do { \
4071 if (!(expr)) \
4072 LogRel(fmt); \
4073 } while (0)
4074
4075 /* For comparing two values and bitch if they differs. */
4076#define CPUID_CHECK2_RET(what, host, saved) \
4077 do { \
4078 if ((host) != (saved)) \
4079 { \
4080 if (fStrictCpuIdChecks) \
4081 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4082 N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \
4083 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
4084 } \
4085 } while (0)
4086#define CPUID_CHECK2_WRN(what, host, saved) \
4087 do { \
4088 if ((host) != (saved)) \
4089 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
4090 } while (0)
4091
4092 /* For checking raw cpu features (raw mode). */
4093#define CPUID_RAW_FEATURE_RET(set, reg, bit) \
4094 do { \
4095 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
4096 { \
4097 if (fStrictCpuIdChecks) \
4098 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4099 N_(#bit " mismatch: host=%d saved=%d"), \
4100 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \
4101 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
4102 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
4103 } \
4104 } while (0)
4105#define CPUID_RAW_FEATURE_WRN(set, reg, bit) \
4106 do { \
4107 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
4108 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
4109 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
4110 } while (0)
4111#define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0)
4112
4113 /* For checking guest features. */
4114#define CPUID_GST_FEATURE_RET(set, reg, bit) \
4115 do { \
4116 if ( (aGuestCpuId##set [1].reg & bit) \
4117 && !(aHostRaw##set [1].reg & bit) \
4118 && !(aHostOverride##set [1].reg & bit) \
4119 ) \
4120 { \
4121 if (fStrictCpuIdChecks) \
4122 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4123 N_(#bit " is not supported by the host but has already exposed to the guest")); \
4124 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
4125 } \
4126 } while (0)
4127#define CPUID_GST_FEATURE_WRN(set, reg, bit) \
4128 do { \
4129 if ( (aGuestCpuId##set [1].reg & bit) \
4130 && !(aHostRaw##set [1].reg & bit) \
4131 && !(aHostOverride##set [1].reg & bit) \
4132 ) \
4133 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
4134 } while (0)
4135#define CPUID_GST_FEATURE_EMU(set, reg, bit) \
4136 do { \
4137 if ( (aGuestCpuId##set [1].reg & bit) \
4138 && !(aHostRaw##set [1].reg & bit) \
4139 && !(aHostOverride##set [1].reg & bit) \
4140 ) \
4141 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
4142 } while (0)
4143#define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0)
4144
4145 /* For checking guest features if AMD guest CPU. */
4146#define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \
4147 do { \
4148 if ( (aGuestCpuId##set [1].reg & bit) \
4149 && fGuestAmd \
4150 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
4151 && !(aHostOverride##set [1].reg & bit) \
4152 ) \
4153 { \
4154 if (fStrictCpuIdChecks) \
4155 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4156 N_(#bit " is not supported by the host but has already exposed to the guest")); \
4157 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
4158 } \
4159 } while (0)
4160#define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \
4161 do { \
4162 if ( (aGuestCpuId##set [1].reg & bit) \
4163 && fGuestAmd \
4164 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
4165 && !(aHostOverride##set [1].reg & bit) \
4166 ) \
4167 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
4168 } while (0)
4169#define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \
4170 do { \
4171 if ( (aGuestCpuId##set [1].reg & bit) \
4172 && fGuestAmd \
4173 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
4174 && !(aHostOverride##set [1].reg & bit) \
4175 ) \
4176 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
4177 } while (0)
4178#define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0)
4179
4180 /* For checking AMD features which have a corresponding bit in the standard
4181 range. (Intel defines very few bits in the extended feature sets.) */
4182#define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \
4183 do { \
4184 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
4185 && !(fHostAmd \
4186 ? aHostRawExt[1].reg & (ExtBit) \
4187 : aHostRawStd[1].reg & (StdBit)) \
4188 && !(aHostOverrideExt[1].reg & (ExtBit)) \
4189 ) \
4190 { \
4191 if (fStrictCpuIdChecks) \
4192 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4193 N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \
4194 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
4195 } \
4196 } while (0)
4197#define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \
4198 do { \
4199 if ( (aGuestCpuId[1].reg & (ExtBit)) \
4200 && !(fHostAmd \
4201 ? aHostRawExt[1].reg & (ExtBit) \
4202 : aHostRawStd[1].reg & (StdBit)) \
4203 && !(aHostOverrideExt[1].reg & (ExtBit)) \
4204 ) \
4205 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
4206 } while (0)
4207#define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \
4208 do { \
4209 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
4210 && !(fHostAmd \
4211 ? aHostRawExt[1].reg & (ExtBit) \
4212 : aHostRawStd[1].reg & (StdBit)) \
4213 && !(aHostOverrideExt[1].reg & (ExtBit)) \
4214 ) \
4215 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
4216 } while (0)
4217#define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0)
4218
4219
4220 /*
4221 * Verify that we can support the features already exposed to the guest on
4222 * this host.
4223 *
4224 * Most of the features we're emulating requires intercepting instruction
4225 * and doing it the slow way, so there is no need to warn when they aren't
4226 * present in the host CPU. Thus we use IGN instead of EMU on these.
4227 *
4228 * Trailing comments:
4229 * "EMU" - Possible to emulate, could be lots of work and very slow.
4230 * "EMU?" - Can this be emulated?
4231 */
4232 CPUMCPUID aGuestCpuIdStd[2];
4233 RT_ZERO(aGuestCpuIdStd);
4234 cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
4235
4236 /* CPUID(1).ecx */
4237 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU
4238 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?
4239 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?
4240 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR);
4241 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?
4242 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU
4243 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU
4244 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_EST); // -> EMU
4245 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?
4246 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU
4247 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU
4248 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_SDBG);
4249 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?
4250 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?
4251 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
4252 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU
4253 CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/);
4254 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID);
4255 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?
4256 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU
4257 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU
4258 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC);
4259 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU
4260 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU
4261 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL);
4262 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES); // -> EMU
4263 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU
4264 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE);
4265 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?
4266 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
4267 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND);
4268 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host
4269
4270 /* CPUID(1).edx */
4271 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU);
4272 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME);
4273 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?
4274 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE);
4275 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
4276 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
4277 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE);
4278 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE);
4279 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
4280 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC);
4281 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/);
4282 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP);
4283 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR);
4284 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE);
4285 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA);
4286 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
4287 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT);
4288 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36);
4289 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN);
4290 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU
4291 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/);
4292 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?
4293 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?
4294 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
4295 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
4296 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU
4297 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU
4298 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?
4299 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?
4300 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?
4301 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU
4302 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?
4303
4304 /* CPUID(0x80000000). */
4305 CPUMCPUID aGuestCpuIdExt[2];
4306 RT_ZERO(aGuestCpuIdExt);
4307 if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
4308 {
4309 /** @todo deal with no 0x80000001 on the host. */
4310 bool const fHostAmd = RTX86IsAmdCpu(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx)
4311 || RTX86IsHygonCpu(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx);
4312 bool const fGuestAmd = RTX86IsAmdCpu(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx)
4313 || RTX86IsHygonCpu(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx);
4314
4315 /* CPUID(0x80000001).ecx */
4316 CPUID_GST_FEATURE_WRN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU
4317 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU
4318 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU
4319 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
4320 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU
4321 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU
4322 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU
4323 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
4324 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
4325 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?
4326 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU
4327 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_XOP); // -> EMU
4328 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU
4329 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU
4330 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14));
4331 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15));
4332 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16));
4333 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17));
4334 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18));
4335 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19));
4336 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20));
4337 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21));
4338 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22));
4339 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23));
4340 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24));
4341 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25));
4342 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26));
4343 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27));
4344 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28));
4345 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29));
4346 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30));
4347 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31));
4348
4349 /* CPUID(0x80000001).edx */
4350 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU
4351 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU
4352 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU
4353 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);
4354 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
4355 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
4356 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);
4357 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);
4358 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
4359 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);
4360 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(10) /*reserved*/);
4361 CPUID_GST_FEATURE_IGN( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only.
4362 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);
4363 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);
4364 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);
4365 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
4366 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);
4367 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
4368 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(18) /*reserved*/);
4369 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(19) /*reserved*/);
4370 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX);
4371 CPUID_GST_FEATURE_WRN( Ext, uEdx, RT_BIT_32(21) /*reserved*/);
4372 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
4373 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
4374 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
4375 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
4376 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
4377 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
4378 CPUID_GST_FEATURE_IGN( Ext, uEdx, RT_BIT_32(28) /*reserved*/);
4379 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
4380 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
4381 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
4382 }
4383
4384 /** @todo check leaf 7 */
4385
4386 /* CPUID(d) - XCR0 stuff - takes ECX as input.
4387 * ECX=0: EAX - Valid bits in XCR0[31:0].
4388 * EBX - Maximum state size as per current XCR0 value.
4389 * ECX - Maximum state size for all supported features.
4390 * EDX - Valid bits in XCR0[63:32].
4391 * ECX=1: EAX - Various X-features.
4392 * EBX - Maximum state size as per current XCR0|IA32_XSS value.
4393 * ECX - Valid bits in IA32_XSS[31:0].
4394 * EDX - Valid bits in IA32_XSS[63:32].
4395 * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
4396 * if the bit invalid all four registers are set to zero.
4397 * EAX - The state size for this feature.
4398 * EBX - The state byte offset of this feature.
4399 * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
4400 * EDX - Reserved, but is set to zero if invalid sub-leaf index.
4401 */
4402 uint64_t fGuestXcr0Mask = 0;
4403 PCPUMCPUIDLEAF pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 0);
4404 if ( pCurLeaf
4405 && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE)
4406 && ( pCurLeaf->uEax
4407 || pCurLeaf->uEbx
4408 || pCurLeaf->uEcx
4409 || pCurLeaf->uEdx) )
4410 {
4411 fGuestXcr0Mask = RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx);
4412 if (fGuestXcr0Mask & ~pVM->cpum.s.fXStateHostMask)
4413 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
4414 N_("CPUID(0xd/0).EDX:EAX mismatch: %#llx saved, %#llx supported by the current host (XCR0 bits)"),
4415 fGuestXcr0Mask, pVM->cpum.s.fXStateHostMask);
4416 if ((fGuestXcr0Mask & (XSAVE_C_X87 | XSAVE_C_SSE)) != (XSAVE_C_X87 | XSAVE_C_SSE))
4417 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
4418 N_("CPUID(0xd/0).EDX:EAX missing mandatory X87 or SSE bits: %#RX64"), fGuestXcr0Mask);
4419
4420 /* We don't support any additional features yet. */
4421 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 1);
4422 if (pCurLeaf && pCurLeaf->uEax)
4423 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
4424 N_("CPUID(0xd/1).EAX=%#x, expected zero"), pCurLeaf->uEax);
4425 if (pCurLeaf && (pCurLeaf->uEcx || pCurLeaf->uEdx))
4426 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
4427 N_("CPUID(0xd/1).EDX:ECX=%#llx, expected zero"),
4428 RT_MAKE_U64(pCurLeaf->uEdx, pCurLeaf->uEcx));
4429
4430
4431#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4432 for (uint32_t uSubLeaf = 2; uSubLeaf < 64; uSubLeaf++)
4433 {
4434 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
4435 if (pCurLeaf)
4436 {
4437 /* If advertised, the state component offset and size must match the one used by host. */
4438 if (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx)
4439 {
4440 CPUMCPUID RawHost;
4441 ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0,
4442 &RawHost.uEax, &RawHost.uEbx, &RawHost.uEcx, &RawHost.uEdx);
4443 if ( RawHost.uEbx != pCurLeaf->uEbx
4444 || RawHost.uEax != pCurLeaf->uEax)
4445 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
4446 N_("CPUID(0xd/%#x).EBX/EAX=%#x/%#x, current host uses %#x/%#x (offset/size)"),
4447 uSubLeaf, pCurLeaf->uEbx, pCurLeaf->uEax, RawHost.uEbx, RawHost.uEax);
4448 }
4449 }
4450 }
4451#endif
4452 }
4453 /* Clear leaf 0xd just in case we're loading an old state... */
4454 else if (pCurLeaf)
4455 {
4456 for (uint32_t uSubLeaf = 0; uSubLeaf < 64; uSubLeaf++)
4457 {
4458 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
4459 if (pCurLeaf)
4460 {
4461 AssertLogRelMsg( uVersion <= CPUM_SAVED_STATE_VERSION_PUT_STRUCT
4462 || ( pCurLeaf->uEax == 0
4463 && pCurLeaf->uEbx == 0
4464 && pCurLeaf->uEcx == 0
4465 && pCurLeaf->uEdx == 0),
4466 ("uVersion=%#x; %#x %#x %#x %#x\n",
4467 uVersion, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx));
4468 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
4469 }
4470 }
4471 }
4472
4473 /* Update the fXStateGuestMask value for the VM. */
4474 if (pVM->cpum.s.fXStateGuestMask != fGuestXcr0Mask)
4475 {
4476 LogRel(("CPUM: fXStateGuestMask=%#llx -> %#llx\n", pVM->cpum.s.fXStateGuestMask, fGuestXcr0Mask));
4477 pVM->cpum.s.fXStateGuestMask = fGuestXcr0Mask;
4478 if (!fGuestXcr0Mask && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
4479 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
4480 N_("Internal Processing Error: XSAVE feature bit enabled, but leaf 0xd is empty."));
4481 }
4482
4483#undef CPUID_CHECK_RET
4484#undef CPUID_CHECK_WRN
4485#undef CPUID_CHECK2_RET
4486#undef CPUID_CHECK2_WRN
4487#undef CPUID_RAW_FEATURE_RET
4488#undef CPUID_RAW_FEATURE_WRN
4489#undef CPUID_RAW_FEATURE_IGN
4490#undef CPUID_GST_FEATURE_RET
4491#undef CPUID_GST_FEATURE_WRN
4492#undef CPUID_GST_FEATURE_EMU
4493#undef CPUID_GST_FEATURE_IGN
4494#undef CPUID_GST_FEATURE2_RET
4495#undef CPUID_GST_FEATURE2_WRN
4496#undef CPUID_GST_FEATURE2_EMU
4497#undef CPUID_GST_FEATURE2_IGN
4498#undef CPUID_GST_AMD_FEATURE_RET
4499#undef CPUID_GST_AMD_FEATURE_WRN
4500#undef CPUID_GST_AMD_FEATURE_EMU
4501#undef CPUID_GST_AMD_FEATURE_IGN
4502
4503 /*
4504 * We're good, commit the CPU ID leaves.
4505 */
4506 pVM->cpum.s.GuestInfo.DefCpuId = GuestDefCpuId;
4507 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves, pMsrs);
4508 AssertLogRelRCReturn(rc, rc);
4509
4510 return VINF_SUCCESS;
4511}
4512
4513
4514/**
4515 * Loads the CPU ID leaves saved by pass 0.
4516 *
4517 * @returns VBox status code.
4518 * @param pVM The cross context VM structure.
4519 * @param pSSM The saved state handle.
4520 * @param uVersion The format version.
4521 * @param pMsrs The guest MSRs.
4522 */
4523int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
4524{
4525 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
4526
4527 /*
4528 * Load the CPUID leaves array first and call worker to do the rest, just so
4529 * we can free the memory when we need to without ending up in column 1000.
4530 */
4531 PCPUMCPUIDLEAF paLeaves;
4532 uint32_t cLeaves;
4533 int rc = cpumR3LoadGuestCpuIdArray(pVM, pSSM, uVersion, &paLeaves, &cLeaves);
4534 AssertRC(rc);
4535 if (RT_SUCCESS(rc))
4536 {
4537 rc = cpumR3LoadCpuIdInner(pVM, pSSM, uVersion, paLeaves, cLeaves, pMsrs);
4538 RTMemFree(paLeaves);
4539 }
4540 return rc;
4541}
4542
4543
4544
4545/**
4546 * Loads the CPU ID leaves saved by pass 0 in an pre 3.2 saved state.
4547 *
4548 * @returns VBox status code.
4549 * @param pVM The cross context VM structure.
4550 * @param pSSM The saved state handle.
4551 * @param uVersion The format version.
4552 */
4553int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
4554{
4555 AssertMsgReturn(uVersion < CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
4556
4557 /*
4558 * Restore the CPUID leaves.
4559 *
4560 * Note that we support restoring less than the current amount of standard
4561 * leaves because we've been allowed more is newer version of VBox.
4562 */
4563 uint32_t cElements;
4564 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
4565 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
4566 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
4567 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdPatmStd[0]));
4568
4569 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
4570 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt))
4571 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
4572 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmExt[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmExt));
4573
4574 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
4575 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur))
4576 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
4577 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmCentaur));
4578
4579 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
4580
4581 /*
4582 * Check that the basic cpuid id information is unchanged.
4583 */
4584 /** @todo we should check the 64 bits capabilities too! */
4585 uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
4586#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4587 ASMCpuIdExSlow(0, 0, 0, 0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
4588 ASMCpuIdExSlow(1, 0, 0, 0, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
4589#endif
4590 uint32_t au32CpuIdSaved[8];
4591 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
4592 if (RT_SUCCESS(rc))
4593 {
4594 /* Ignore CPU stepping. */
4595 au32CpuId[4] &= 0xfffffff0;
4596 au32CpuIdSaved[4] &= 0xfffffff0;
4597
4598 /* Ignore APIC ID (AMD specs). */
4599 au32CpuId[5] &= ~0xff000000;
4600 au32CpuIdSaved[5] &= ~0xff000000;
4601
4602 /* Ignore the number of Logical CPUs (AMD specs). */
4603 au32CpuId[5] &= ~0x00ff0000;
4604 au32CpuIdSaved[5] &= ~0x00ff0000;
4605
4606 /* Ignore some advanced capability bits, that we don't expose to the guest. */
4607 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
4608 | X86_CPUID_FEATURE_ECX_VMX
4609 | X86_CPUID_FEATURE_ECX_SMX
4610 | X86_CPUID_FEATURE_ECX_EST
4611 | X86_CPUID_FEATURE_ECX_TM2
4612 | X86_CPUID_FEATURE_ECX_CNTXID
4613 | X86_CPUID_FEATURE_ECX_TPRUPDATE
4614 | X86_CPUID_FEATURE_ECX_PDCM
4615 | X86_CPUID_FEATURE_ECX_DCA
4616 | X86_CPUID_FEATURE_ECX_X2APIC
4617 );
4618 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
4619 | X86_CPUID_FEATURE_ECX_VMX
4620 | X86_CPUID_FEATURE_ECX_SMX
4621 | X86_CPUID_FEATURE_ECX_EST
4622 | X86_CPUID_FEATURE_ECX_TM2
4623 | X86_CPUID_FEATURE_ECX_CNTXID
4624 | X86_CPUID_FEATURE_ECX_TPRUPDATE
4625 | X86_CPUID_FEATURE_ECX_PDCM
4626 | X86_CPUID_FEATURE_ECX_DCA
4627 | X86_CPUID_FEATURE_ECX_X2APIC
4628 );
4629
4630 /* Make sure we don't forget to update the masks when enabling
4631 * features in the future.
4632 */
4633 AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx &
4634 ( X86_CPUID_FEATURE_ECX_DTES64
4635 | X86_CPUID_FEATURE_ECX_VMX
4636 | X86_CPUID_FEATURE_ECX_SMX
4637 | X86_CPUID_FEATURE_ECX_EST
4638 | X86_CPUID_FEATURE_ECX_TM2
4639 | X86_CPUID_FEATURE_ECX_CNTXID
4640 | X86_CPUID_FEATURE_ECX_TPRUPDATE
4641 | X86_CPUID_FEATURE_ECX_PDCM
4642 | X86_CPUID_FEATURE_ECX_DCA
4643 | X86_CPUID_FEATURE_ECX_X2APIC
4644 )));
4645 /* do the compare */
4646 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
4647 {
4648 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
4649 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
4650 "Saved=%.*Rhxs\n"
4651 "Real =%.*Rhxs\n",
4652 sizeof(au32CpuIdSaved), au32CpuIdSaved,
4653 sizeof(au32CpuId), au32CpuId));
4654 else
4655 {
4656 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
4657 "Saved=%.*Rhxs\n"
4658 "Real =%.*Rhxs\n",
4659 sizeof(au32CpuIdSaved), au32CpuIdSaved,
4660 sizeof(au32CpuId), au32CpuId));
4661 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
4662 }
4663 }
4664 }
4665
4666 return rc;
4667}
4668
4669
4670
4671/*
4672 *
4673 *
4674 * CPUID Info Handler.
4675 * CPUID Info Handler.
4676 * CPUID Info Handler.
4677 *
4678 *
4679 */
4680
4681
4682
4683/**
4684 * Get L1 cache / TLS associativity.
4685 */
4686static const char *getCacheAss(unsigned u, char *pszBuf)
4687{
4688 if (u == 0)
4689 return "res0 ";
4690 if (u == 1)
4691 return "direct";
4692 if (u == 255)
4693 return "fully";
4694 if (u >= 256)
4695 return "???";
4696
4697 RTStrPrintf(pszBuf, 16, "%d way", u);
4698 return pszBuf;
4699}
4700
4701
4702/**
4703 * Get L2 cache associativity.
4704 */
4705static const char *getL2CacheAss(unsigned u)
4706{
4707 switch (u)
4708 {
4709 case 0: return "off ";
4710 case 1: return "direct";
4711 case 2: return "2 way ";
4712 case 3: return "res3 ";
4713 case 4: return "4 way ";
4714 case 5: return "res5 ";
4715 case 6: return "8 way ";
4716 case 7: return "res7 ";
4717 case 8: return "16 way";
4718 case 9: return "res9 ";
4719 case 10: return "res10 ";
4720 case 11: return "res11 ";
4721 case 12: return "res12 ";
4722 case 13: return "res13 ";
4723 case 14: return "res14 ";
4724 case 15: return "fully ";
4725 default: return "????";
4726 }
4727}
4728
4729
4730/** CPUID(1).EDX field descriptions. */
4731static DBGFREGSUBFIELD const g_aLeaf1EdxSubFields[] =
4732{
4733 DBGFREGSUBFIELD_RO("FPU\0" "x87 FPU on Chip", 0, 1, 0),
4734 DBGFREGSUBFIELD_RO("VME\0" "Virtual 8086 Mode Enhancements", 1, 1, 0),
4735 DBGFREGSUBFIELD_RO("DE\0" "Debugging extensions", 2, 1, 0),
4736 DBGFREGSUBFIELD_RO("PSE\0" "Page Size Extension", 3, 1, 0),
4737 DBGFREGSUBFIELD_RO("TSC\0" "Time Stamp Counter", 4, 1, 0),
4738 DBGFREGSUBFIELD_RO("MSR\0" "Model Specific Registers", 5, 1, 0),
4739 DBGFREGSUBFIELD_RO("PAE\0" "Physical Address Extension", 6, 1, 0),
4740 DBGFREGSUBFIELD_RO("MCE\0" "Machine Check Exception", 7, 1, 0),
4741 DBGFREGSUBFIELD_RO("CX8\0" "CMPXCHG8B instruction", 8, 1, 0),
4742 DBGFREGSUBFIELD_RO("APIC\0" "APIC On-Chip", 9, 1, 0),
4743 DBGFREGSUBFIELD_RO("SEP\0" "SYSENTER and SYSEXIT Present", 11, 1, 0),
4744 DBGFREGSUBFIELD_RO("MTRR\0" "Memory Type Range Registers", 12, 1, 0),
4745 DBGFREGSUBFIELD_RO("PGE\0" "PTE Global Bit", 13, 1, 0),
4746 DBGFREGSUBFIELD_RO("MCA\0" "Machine Check Architecture", 14, 1, 0),
4747 DBGFREGSUBFIELD_RO("CMOV\0" "Conditional Move instructions", 15, 1, 0),
4748 DBGFREGSUBFIELD_RO("PAT\0" "Page Attribute Table", 16, 1, 0),
4749 DBGFREGSUBFIELD_RO("PSE-36\0" "36-bit Page Size Extension", 17, 1, 0),
4750 DBGFREGSUBFIELD_RO("PSN\0" "Processor Serial Number", 18, 1, 0),
4751 DBGFREGSUBFIELD_RO("CLFSH\0" "CLFLUSH instruction", 19, 1, 0),
4752 DBGFREGSUBFIELD_RO("DS\0" "Debug Store", 21, 1, 0),
4753 DBGFREGSUBFIELD_RO("ACPI\0" "Thermal Mon. & Soft. Clock Ctrl.", 22, 1, 0),
4754 DBGFREGSUBFIELD_RO("MMX\0" "Intel MMX Technology", 23, 1, 0),
4755 DBGFREGSUBFIELD_RO("FXSR\0" "FXSAVE and FXRSTOR instructions", 24, 1, 0),
4756 DBGFREGSUBFIELD_RO("SSE\0" "SSE support", 25, 1, 0),
4757 DBGFREGSUBFIELD_RO("SSE2\0" "SSE2 support", 26, 1, 0),
4758 DBGFREGSUBFIELD_RO("SS\0" "Self Snoop", 27, 1, 0),
4759 DBGFREGSUBFIELD_RO("HTT\0" "Hyper-Threading Technology", 28, 1, 0),
4760 DBGFREGSUBFIELD_RO("TM\0" "Therm. Monitor", 29, 1, 0),
4761 DBGFREGSUBFIELD_RO("PBE\0" "Pending Break Enabled", 31, 1, 0),
4762 DBGFREGSUBFIELD_TERMINATOR()
4763};
4764
4765/** CPUID(1).ECX field descriptions. */
4766static DBGFREGSUBFIELD const g_aLeaf1EcxSubFields[] =
4767{
4768 DBGFREGSUBFIELD_RO("SSE3\0" "SSE3 support", 0, 1, 0),
4769 DBGFREGSUBFIELD_RO("PCLMUL\0" "PCLMULQDQ support (for AES-GCM)", 1, 1, 0),
4770 DBGFREGSUBFIELD_RO("DTES64\0" "DS Area 64-bit Layout", 2, 1, 0),
4771 DBGFREGSUBFIELD_RO("MONITOR\0" "MONITOR/MWAIT instructions", 3, 1, 0),
4772 DBGFREGSUBFIELD_RO("CPL-DS\0" "CPL Qualified Debug Store", 4, 1, 0),
4773 DBGFREGSUBFIELD_RO("VMX\0" "Virtual Machine Extensions", 5, 1, 0),
4774 DBGFREGSUBFIELD_RO("SMX\0" "Safer Mode Extensions", 6, 1, 0),
4775 DBGFREGSUBFIELD_RO("EST\0" "Enhanced SpeedStep Technology", 7, 1, 0),
4776 DBGFREGSUBFIELD_RO("TM2\0" "Terminal Monitor 2", 8, 1, 0),
4777 DBGFREGSUBFIELD_RO("SSSE3\0" "Supplemental Streaming SIMD Extensions 3", 9, 1, 0),
4778 DBGFREGSUBFIELD_RO("CNTX-ID\0" "L1 Context ID", 10, 1, 0),
4779 DBGFREGSUBFIELD_RO("SDBG\0" "Silicon Debug interface", 11, 1, 0),
4780 DBGFREGSUBFIELD_RO("FMA\0" "Fused Multiply Add extensions", 12, 1, 0),
4781 DBGFREGSUBFIELD_RO("CX16\0" "CMPXCHG16B instruction", 13, 1, 0),
4782 DBGFREGSUBFIELD_RO("TPRUPDATE\0" "xTPR Update Control", 14, 1, 0),
4783 DBGFREGSUBFIELD_RO("PDCM\0" "Perf/Debug Capability MSR", 15, 1, 0),
4784 DBGFREGSUBFIELD_RO("PCID\0" "Process Context Identifiers", 17, 1, 0),
4785 DBGFREGSUBFIELD_RO("DCA\0" "Direct Cache Access", 18, 1, 0),
4786 DBGFREGSUBFIELD_RO("SSE4_1\0" "SSE4_1 support", 19, 1, 0),
4787 DBGFREGSUBFIELD_RO("SSE4_2\0" "SSE4_2 support", 20, 1, 0),
4788 DBGFREGSUBFIELD_RO("X2APIC\0" "x2APIC support", 21, 1, 0),
4789 DBGFREGSUBFIELD_RO("MOVBE\0" "MOVBE instruction", 22, 1, 0),
4790 DBGFREGSUBFIELD_RO("POPCNT\0" "POPCNT instruction", 23, 1, 0),
4791 DBGFREGSUBFIELD_RO("TSCDEADL\0" "Time Stamp Counter Deadline", 24, 1, 0),
4792 DBGFREGSUBFIELD_RO("AES\0" "AES instructions", 25, 1, 0),
4793 DBGFREGSUBFIELD_RO("XSAVE\0" "XSAVE instruction", 26, 1, 0),
4794 DBGFREGSUBFIELD_RO("OSXSAVE\0" "OSXSAVE instruction", 27, 1, 0),
4795 DBGFREGSUBFIELD_RO("AVX\0" "AVX support", 28, 1, 0),
4796 DBGFREGSUBFIELD_RO("F16C\0" "16-bit floating point conversion instructions", 29, 1, 0),
4797 DBGFREGSUBFIELD_RO("RDRAND\0" "RDRAND instruction", 30, 1, 0),
4798 DBGFREGSUBFIELD_RO("HVP\0" "Hypervisor Present (we're a guest)", 31, 1, 0),
4799 DBGFREGSUBFIELD_TERMINATOR()
4800};
4801
4802/** CPUID(7,0).EBX field descriptions. */
4803static DBGFREGSUBFIELD const g_aLeaf7Sub0EbxSubFields[] =
4804{
4805 DBGFREGSUBFIELD_RO("FSGSBASE\0" "RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE instr.", 0, 1, 0),
4806 DBGFREGSUBFIELD_RO("TSCADJUST\0" "Supports MSR_IA32_TSC_ADJUST", 1, 1, 0),
4807 DBGFREGSUBFIELD_RO("SGX\0" "Supports Software Guard Extensions", 2, 1, 0),
4808 DBGFREGSUBFIELD_RO("BMI1\0" "Advanced Bit Manipulation extension 1", 3, 1, 0),
4809 DBGFREGSUBFIELD_RO("HLE\0" "Hardware Lock Elision", 4, 1, 0),
4810 DBGFREGSUBFIELD_RO("AVX2\0" "Advanced Vector Extensions 2", 5, 1, 0),
4811 DBGFREGSUBFIELD_RO("FDP_EXCPTN_ONLY\0" "FPU DP only updated on exceptions", 6, 1, 0),
4812 DBGFREGSUBFIELD_RO("SMEP\0" "Supervisor Mode Execution Prevention", 7, 1, 0),
4813 DBGFREGSUBFIELD_RO("BMI2\0" "Advanced Bit Manipulation extension 2", 8, 1, 0),
4814 DBGFREGSUBFIELD_RO("ERMS\0" "Enhanced REP MOVSB/STOSB instructions", 9, 1, 0),
4815 DBGFREGSUBFIELD_RO("INVPCID\0" "INVPCID instruction", 10, 1, 0),
4816 DBGFREGSUBFIELD_RO("RTM\0" "Restricted Transactional Memory", 11, 1, 0),
4817 DBGFREGSUBFIELD_RO("PQM\0" "Platform Quality of Service Monitoring", 12, 1, 0),
4818 DBGFREGSUBFIELD_RO("DEPFPU_CS_DS\0" "Deprecates FPU CS, FPU DS values if set", 13, 1, 0),
4819 DBGFREGSUBFIELD_RO("MPE\0" "Intel Memory Protection Extensions", 14, 1, 0),
4820 DBGFREGSUBFIELD_RO("PQE\0" "Platform Quality of Service Enforcement", 15, 1, 0),
4821 DBGFREGSUBFIELD_RO("AVX512F\0" "AVX512 Foundation instructions", 16, 1, 0),
4822 DBGFREGSUBFIELD_RO("RDSEED\0" "RDSEED instruction", 18, 1, 0),
4823 DBGFREGSUBFIELD_RO("ADX\0" "ADCX/ADOX instructions", 19, 1, 0),
4824 DBGFREGSUBFIELD_RO("SMAP\0" "Supervisor Mode Access Prevention", 20, 1, 0),
4825 DBGFREGSUBFIELD_RO("CLFLUSHOPT\0" "CLFLUSHOPT (Cache Line Flush) instruction", 23, 1, 0),
4826 DBGFREGSUBFIELD_RO("INTEL_PT\0" "Intel Processor Trace", 25, 1, 0),
4827 DBGFREGSUBFIELD_RO("AVX512PF\0" "AVX512 Prefetch instructions", 26, 1, 0),
4828 DBGFREGSUBFIELD_RO("AVX512ER\0" "AVX512 Exponential & Reciprocal instructions", 27, 1, 0),
4829 DBGFREGSUBFIELD_RO("AVX512CD\0" "AVX512 Conflict Detection instructions", 28, 1, 0),
4830 DBGFREGSUBFIELD_RO("SHA\0" "Secure Hash Algorithm extensions", 29, 1, 0),
4831 DBGFREGSUBFIELD_TERMINATOR()
4832};
4833
4834/** CPUID(7,0).ECX field descriptions. */
4835static DBGFREGSUBFIELD const g_aLeaf7Sub0EcxSubFields[] =
4836{
4837 DBGFREGSUBFIELD_RO("PREFETCHWT1\0" "PREFETCHWT1 instruction", 0, 1, 0),
4838 DBGFREGSUBFIELD_RO("UMIP\0" "User mode insturction prevention", 2, 1, 0),
4839 DBGFREGSUBFIELD_RO("PKU\0" "Protection Key for Usermode pages", 3, 1, 0),
4840 DBGFREGSUBFIELD_RO("OSPKE\0" "CR4.PKU mirror", 4, 1, 0),
4841 DBGFREGSUBFIELD_RO("MAWAU\0" "Value used by BNDLDX & BNDSTX", 17, 5, 0),
4842 DBGFREGSUBFIELD_RO("RDPID\0" "Read processor ID support", 22, 1, 0),
4843 DBGFREGSUBFIELD_RO("SGX_LC\0" "Supports SGX Launch Configuration", 30, 1, 0),
4844 DBGFREGSUBFIELD_TERMINATOR()
4845};
4846
4847/** CPUID(7,0).EDX field descriptions. */
4848static DBGFREGSUBFIELD const g_aLeaf7Sub0EdxSubFields[] =
4849{
4850 DBGFREGSUBFIELD_RO("MD_CLEAR\0" "Supports MDS related buffer clearing", 10, 1, 0),
4851 DBGFREGSUBFIELD_RO("IBRS_IBPB\0" "IA32_SPEC_CTRL.IBRS and IA32_PRED_CMD.IBPB", 26, 1, 0),
4852 DBGFREGSUBFIELD_RO("STIBP\0" "Supports IA32_SPEC_CTRL.STIBP", 27, 1, 0),
4853 DBGFREGSUBFIELD_RO("FLUSH_CMD\0" "Supports IA32_FLUSH_CMD", 28, 1, 0),
4854 DBGFREGSUBFIELD_RO("ARCHCAP\0" "Supports IA32_ARCH_CAP", 29, 1, 0),
4855 DBGFREGSUBFIELD_RO("CORECAP\0" "Supports IA32_CORE_CAP", 30, 1, 0),
4856 DBGFREGSUBFIELD_RO("SSBD\0" "Supports IA32_SPEC_CTRL.SSBD", 31, 1, 0),
4857 DBGFREGSUBFIELD_TERMINATOR()
4858};
4859
4860
4861/** CPUID(13,0).EAX+EDX, XCR0, ++ bit descriptions. */
4862static DBGFREGSUBFIELD const g_aXSaveStateBits[] =
4863{
4864 DBGFREGSUBFIELD_RO("x87\0" "Legacy FPU state", 0, 1, 0),
4865 DBGFREGSUBFIELD_RO("SSE\0" "128-bit SSE state", 1, 1, 0),
4866 DBGFREGSUBFIELD_RO("YMM_Hi128\0" "Upper 128 bits of YMM0-15 (AVX)", 2, 1, 0),
4867 DBGFREGSUBFIELD_RO("BNDREGS\0" "MPX bound register state", 3, 1, 0),
4868 DBGFREGSUBFIELD_RO("BNDCSR\0" "MPX bound config and status state", 4, 1, 0),
4869 DBGFREGSUBFIELD_RO("Opmask\0" "opmask state", 5, 1, 0),
4870 DBGFREGSUBFIELD_RO("ZMM_Hi256\0" "Upper 256 bits of ZMM0-15 (AVX-512)", 6, 1, 0),
4871 DBGFREGSUBFIELD_RO("Hi16_ZMM\0" "512-bits ZMM16-31 state (AVX-512)", 7, 1, 0),
4872 DBGFREGSUBFIELD_RO("LWP\0" "Lightweight Profiling (AMD)", 62, 1, 0),
4873 DBGFREGSUBFIELD_TERMINATOR()
4874};
4875
4876/** CPUID(13,1).EAX field descriptions. */
4877static DBGFREGSUBFIELD const g_aLeaf13Sub1EaxSubFields[] =
4878{
4879 DBGFREGSUBFIELD_RO("XSAVEOPT\0" "XSAVEOPT is available", 0, 1, 0),
4880 DBGFREGSUBFIELD_RO("XSAVEC\0" "XSAVEC and compacted XRSTOR supported", 1, 1, 0),
4881 DBGFREGSUBFIELD_RO("XGETBC1\0" "XGETBV with ECX=1 supported", 2, 1, 0),
4882 DBGFREGSUBFIELD_RO("XSAVES\0" "XSAVES/XRSTORS and IA32_XSS supported", 3, 1, 0),
4883 DBGFREGSUBFIELD_TERMINATOR()
4884};
4885
4886
4887/** CPUID(0x80000001,0).EDX field descriptions. */
4888static DBGFREGSUBFIELD const g_aExtLeaf1EdxSubFields[] =
4889{
4890 DBGFREGSUBFIELD_RO("FPU\0" "x87 FPU on Chip", 0, 1, 0),
4891 DBGFREGSUBFIELD_RO("VME\0" "Virtual 8086 Mode Enhancements", 1, 1, 0),
4892 DBGFREGSUBFIELD_RO("DE\0" "Debugging extensions", 2, 1, 0),
4893 DBGFREGSUBFIELD_RO("PSE\0" "Page Size Extension", 3, 1, 0),
4894 DBGFREGSUBFIELD_RO("TSC\0" "Time Stamp Counter", 4, 1, 0),
4895 DBGFREGSUBFIELD_RO("MSR\0" "K86 Model Specific Registers", 5, 1, 0),
4896 DBGFREGSUBFIELD_RO("PAE\0" "Physical Address Extension", 6, 1, 0),
4897 DBGFREGSUBFIELD_RO("MCE\0" "Machine Check Exception", 7, 1, 0),
4898 DBGFREGSUBFIELD_RO("CX8\0" "CMPXCHG8B instruction", 8, 1, 0),
4899 DBGFREGSUBFIELD_RO("APIC\0" "APIC On-Chip", 9, 1, 0),
4900 DBGFREGSUBFIELD_RO("SEP\0" "SYSCALL/SYSRET", 11, 1, 0),
4901 DBGFREGSUBFIELD_RO("MTRR\0" "Memory Type Range Registers", 12, 1, 0),
4902 DBGFREGSUBFIELD_RO("PGE\0" "PTE Global Bit", 13, 1, 0),
4903 DBGFREGSUBFIELD_RO("MCA\0" "Machine Check Architecture", 14, 1, 0),
4904 DBGFREGSUBFIELD_RO("CMOV\0" "Conditional Move instructions", 15, 1, 0),
4905 DBGFREGSUBFIELD_RO("PAT\0" "Page Attribute Table", 16, 1, 0),
4906 DBGFREGSUBFIELD_RO("PSE-36\0" "36-bit Page Size Extension", 17, 1, 0),
4907 DBGFREGSUBFIELD_RO("NX\0" "No-Execute/Execute-Disable", 20, 1, 0),
4908 DBGFREGSUBFIELD_RO("AXMMX\0" "AMD Extensions to MMX instructions", 22, 1, 0),
4909 DBGFREGSUBFIELD_RO("MMX\0" "Intel MMX Technology", 23, 1, 0),
4910 DBGFREGSUBFIELD_RO("FXSR\0" "FXSAVE and FXRSTOR Instructions", 24, 1, 0),
4911 DBGFREGSUBFIELD_RO("FFXSR\0" "AMD fast FXSAVE and FXRSTOR instructions", 25, 1, 0),
4912 DBGFREGSUBFIELD_RO("Page1GB\0" "1 GB large page", 26, 1, 0),
4913 DBGFREGSUBFIELD_RO("RDTSCP\0" "RDTSCP instruction", 27, 1, 0),
4914 DBGFREGSUBFIELD_RO("LM\0" "AMD64 Long Mode", 29, 1, 0),
4915 DBGFREGSUBFIELD_RO("3DNOWEXT\0" "AMD Extensions to 3DNow", 30, 1, 0),
4916 DBGFREGSUBFIELD_RO("3DNOW\0" "AMD 3DNow", 31, 1, 0),
4917 DBGFREGSUBFIELD_TERMINATOR()
4918};
4919
4920/** CPUID(0x80000001,0).ECX field descriptions. */
4921static DBGFREGSUBFIELD const g_aExtLeaf1EcxSubFields[] =
4922{
4923 DBGFREGSUBFIELD_RO("LahfSahf\0" "LAHF/SAHF support in 64-bit mode", 0, 1, 0),
4924 DBGFREGSUBFIELD_RO("CmpLegacy\0" "Core multi-processing legacy mode", 1, 1, 0),
4925 DBGFREGSUBFIELD_RO("SVM\0" "AMD Secure Virtual Machine extensions", 2, 1, 0),
4926 DBGFREGSUBFIELD_RO("EXTAPIC\0" "AMD Extended APIC registers", 3, 1, 0),
4927 DBGFREGSUBFIELD_RO("CR8L\0" "AMD LOCK MOV CR0 means MOV CR8", 4, 1, 0),
4928 DBGFREGSUBFIELD_RO("ABM\0" "AMD Advanced Bit Manipulation", 5, 1, 0),
4929 DBGFREGSUBFIELD_RO("SSE4A\0" "SSE4A instructions", 6, 1, 0),
4930 DBGFREGSUBFIELD_RO("MISALIGNSSE\0" "AMD Misaligned SSE mode", 7, 1, 0),
4931 DBGFREGSUBFIELD_RO("3DNOWPRF\0" "AMD PREFETCH and PREFETCHW instructions", 8, 1, 0),
4932 DBGFREGSUBFIELD_RO("OSVW\0" "AMD OS Visible Workaround", 9, 1, 0),
4933 DBGFREGSUBFIELD_RO("IBS\0" "Instruct Based Sampling", 10, 1, 0),
4934 DBGFREGSUBFIELD_RO("XOP\0" "Extended Operation support", 11, 1, 0),
4935 DBGFREGSUBFIELD_RO("SKINIT\0" "SKINIT, STGI, and DEV support", 12, 1, 0),
4936 DBGFREGSUBFIELD_RO("WDT\0" "AMD Watchdog Timer support", 13, 1, 0),
4937 DBGFREGSUBFIELD_RO("LWP\0" "Lightweight Profiling support", 15, 1, 0),
4938 DBGFREGSUBFIELD_RO("FMA4\0" "Four operand FMA instruction support", 16, 1, 0),
4939 DBGFREGSUBFIELD_RO("NodeId\0" "NodeId in MSR C001_100C", 19, 1, 0),
4940 DBGFREGSUBFIELD_RO("TBM\0" "Trailing Bit Manipulation instructions", 21, 1, 0),
4941 DBGFREGSUBFIELD_RO("TOPOEXT\0" "Topology Extensions", 22, 1, 0),
4942 DBGFREGSUBFIELD_RO("PRFEXTCORE\0" "Performance Counter Extensions support", 23, 1, 0),
4943 DBGFREGSUBFIELD_RO("PRFEXTNB\0" "NB Performance Counter Extensions support", 24, 1, 0),
4944 DBGFREGSUBFIELD_RO("DATABPEXT\0" "Data-access Breakpoint Extension", 26, 1, 0),
4945 DBGFREGSUBFIELD_RO("PERFTSC\0" "Performance Time Stamp Counter", 27, 1, 0),
4946 DBGFREGSUBFIELD_RO("PCX_L2I\0" "L2I/L3 Performance Counter Extensions", 28, 1, 0),
4947 DBGFREGSUBFIELD_RO("MWAITX\0" "MWAITX and MONITORX instructions", 29, 1, 0),
4948 DBGFREGSUBFIELD_TERMINATOR()
4949};
4950
4951/** CPUID(0x8000000a,0).EDX field descriptions. */
4952static DBGFREGSUBFIELD const g_aExtLeafAEdxSubFields[] =
4953{
4954 DBGFREGSUBFIELD_RO("NP\0" "Nested Paging", 0, 1, 0),
4955 DBGFREGSUBFIELD_RO("LbrVirt\0" "Last Branch Record Virtualization", 1, 1, 0),
4956 DBGFREGSUBFIELD_RO("SVML\0" "SVM Lock", 2, 1, 0),
4957 DBGFREGSUBFIELD_RO("NRIPS\0" "NextRIP Save", 3, 1, 0),
4958 DBGFREGSUBFIELD_RO("TscRateMsr\0" "MSR based TSC rate control", 4, 1, 0),
4959 DBGFREGSUBFIELD_RO("VmcbClean\0" "VMCB clean bits", 5, 1, 0),
4960 DBGFREGSUBFIELD_RO("FlushByASID\0" "Flush by ASID", 6, 1, 0),
4961 DBGFREGSUBFIELD_RO("DecodeAssists\0" "Decode Assists", 7, 1, 0),
4962 DBGFREGSUBFIELD_RO("PauseFilter\0" "Pause intercept filter", 10, 1, 0),
4963 DBGFREGSUBFIELD_RO("PauseFilterThreshold\0" "Pause filter threshold", 12, 1, 0),
4964 DBGFREGSUBFIELD_RO("AVIC\0" "Advanced Virtual Interrupt Controller", 13, 1, 0),
4965 DBGFREGSUBFIELD_RO("VMSAVEVirt\0" "VMSAVE and VMLOAD Virtualization", 15, 1, 0),
4966 DBGFREGSUBFIELD_RO("VGIF\0" "Virtual Global-Interrupt Flag", 16, 1, 0),
4967 DBGFREGSUBFIELD_RO("GMET\0" "Guest Mode Execute Trap Extension", 17, 1, 0),
4968 DBGFREGSUBFIELD_TERMINATOR()
4969};
4970
4971
4972/** CPUID(0x80000007,0).EDX field descriptions. */
4973static DBGFREGSUBFIELD const g_aExtLeaf7EdxSubFields[] =
4974{
4975 DBGFREGSUBFIELD_RO("TS\0" "Temperature Sensor", 0, 1, 0),
4976 DBGFREGSUBFIELD_RO("FID\0" "Frequency ID control", 1, 1, 0),
4977 DBGFREGSUBFIELD_RO("VID\0" "Voltage ID control", 2, 1, 0),
4978 DBGFREGSUBFIELD_RO("VID\0" "Voltage ID control", 2, 1, 0),
4979 DBGFREGSUBFIELD_RO("TTP\0" "Thermal Trip", 3, 1, 0),
4980 DBGFREGSUBFIELD_RO("TM\0" "Hardware Thermal Control (HTC)", 4, 1, 0),
4981 DBGFREGSUBFIELD_RO("100MHzSteps\0" "100 MHz Multiplier control", 6, 1, 0),
4982 DBGFREGSUBFIELD_RO("HwPstate\0" "Hardware P-state control", 7, 1, 0),
4983 DBGFREGSUBFIELD_RO("TscInvariant\0" "Invariant Time Stamp Counter", 8, 1, 0),
4984 DBGFREGSUBFIELD_RO("CBP\0" "Core Performance Boost", 9, 1, 0),
4985 DBGFREGSUBFIELD_RO("EffFreqRO\0" "Read-only Effective Frequency Interface", 10, 1, 0),
4986 DBGFREGSUBFIELD_RO("ProcFdbkIf\0" "Processor Feedback Interface", 11, 1, 0),
4987 DBGFREGSUBFIELD_RO("ProcPwrRep\0" "Core power reporting interface support", 12, 1, 0),
4988 DBGFREGSUBFIELD_TERMINATOR()
4989};
4990
4991/** CPUID(0x80000008,0).EBX field descriptions. */
4992static DBGFREGSUBFIELD const g_aExtLeaf8EbxSubFields[] =
4993{
4994 DBGFREGSUBFIELD_RO("CLZERO\0" "Clear zero instruction (cacheline)", 0, 1, 0),
4995 DBGFREGSUBFIELD_RO("IRPerf\0" "Instructions retired count support", 1, 1, 0),
4996 DBGFREGSUBFIELD_RO("XSaveErPtr\0" "Save/restore error pointers (FXSAVE/RSTOR*)", 2, 1, 0),
4997 DBGFREGSUBFIELD_RO("RDPRU\0" "RDPRU instruction", 4, 1, 0),
4998 DBGFREGSUBFIELD_RO("MCOMMIT\0" "MCOMMIT instruction", 8, 1, 0),
4999 DBGFREGSUBFIELD_RO("IBPB\0" "Supports the IBPB command in IA32_PRED_CMD", 12, 1, 0),
5000 DBGFREGSUBFIELD_TERMINATOR()
5001};
5002
5003
5004static void cpumR3CpuIdInfoMnemonicListU32(PCDBGFINFOHLP pHlp, uint32_t uVal, PCDBGFREGSUBFIELD pDesc,
5005 const char *pszLeadIn, uint32_t cchWidth)
5006{
5007 if (pszLeadIn)
5008 pHlp->pfnPrintf(pHlp, "%*s", cchWidth, pszLeadIn);
5009
5010 for (uint32_t iBit = 0; iBit < 32; iBit++)
5011 if (RT_BIT_32(iBit) & uVal)
5012 {
5013 while ( pDesc->pszName != NULL
5014 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
5015 pDesc++;
5016 if ( pDesc->pszName != NULL
5017 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
5018 {
5019 if (pDesc->cBits == 1)
5020 pHlp->pfnPrintf(pHlp, " %s", pDesc->pszName);
5021 else
5022 {
5023 uint32_t uFieldValue = uVal >> pDesc->iFirstBit;
5024 if (pDesc->cBits < 32)
5025 uFieldValue &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
5026 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s=%u" : " %s=%#x", pDesc->pszName, uFieldValue);
5027 iBit = pDesc->iFirstBit + pDesc->cBits - 1;
5028 }
5029 }
5030 else
5031 pHlp->pfnPrintf(pHlp, " %u", iBit);
5032 }
5033 if (pszLeadIn)
5034 pHlp->pfnPrintf(pHlp, "\n");
5035}
5036
5037
5038static void cpumR3CpuIdInfoMnemonicListU64(PCDBGFINFOHLP pHlp, uint64_t uVal, PCDBGFREGSUBFIELD pDesc,
5039 const char *pszLeadIn, uint32_t cchWidth)
5040{
5041 if (pszLeadIn)
5042 pHlp->pfnPrintf(pHlp, "%*s", cchWidth, pszLeadIn);
5043
5044 for (uint32_t iBit = 0; iBit < 64; iBit++)
5045 if (RT_BIT_64(iBit) & uVal)
5046 {
5047 while ( pDesc->pszName != NULL
5048 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
5049 pDesc++;
5050 if ( pDesc->pszName != NULL
5051 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
5052 {
5053 if (pDesc->cBits == 1)
5054 pHlp->pfnPrintf(pHlp, " %s", pDesc->pszName);
5055 else
5056 {
5057 uint64_t uFieldValue = uVal >> pDesc->iFirstBit;
5058 if (pDesc->cBits < 64)
5059 uFieldValue &= RT_BIT_64(pDesc->cBits) - UINT64_C(1);
5060 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s=%llu" : " %s=%#llx", pDesc->pszName, uFieldValue);
5061 iBit = pDesc->iFirstBit + pDesc->cBits - 1;
5062 }
5063 }
5064 else
5065 pHlp->pfnPrintf(pHlp, " %u", iBit);
5066 }
5067 if (pszLeadIn)
5068 pHlp->pfnPrintf(pHlp, "\n");
5069}
5070
5071
5072static void cpumR3CpuIdInfoValueWithMnemonicListU64(PCDBGFINFOHLP pHlp, uint64_t uVal, PCDBGFREGSUBFIELD pDesc,
5073 const char *pszLeadIn, uint32_t cchWidth)
5074{
5075 if (!uVal)
5076 pHlp->pfnPrintf(pHlp, "%*s %#010x`%08x\n", cchWidth, pszLeadIn, RT_HI_U32(uVal), RT_LO_U32(uVal));
5077 else
5078 {
5079 pHlp->pfnPrintf(pHlp, "%*s %#010x`%08x (", cchWidth, pszLeadIn, RT_HI_U32(uVal), RT_LO_U32(uVal));
5080 cpumR3CpuIdInfoMnemonicListU64(pHlp, uVal, pDesc, NULL, 0);
5081 pHlp->pfnPrintf(pHlp, " )\n");
5082 }
5083}
5084
5085
5086static void cpumR3CpuIdInfoVerboseCompareListU32(PCDBGFINFOHLP pHlp, uint32_t uVal1, uint32_t uVal2, PCDBGFREGSUBFIELD pDesc,
5087 uint32_t cchWidth)
5088{
5089 uint32_t uCombined = uVal1 | uVal2;
5090 for (uint32_t iBit = 0; iBit < 32; iBit++)
5091 if ( (RT_BIT_32(iBit) & uCombined)
5092 || (iBit == pDesc->iFirstBit && pDesc->pszName) )
5093 {
5094 while ( pDesc->pszName != NULL
5095 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
5096 pDesc++;
5097
5098 if ( pDesc->pszName != NULL
5099 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
5100 {
5101 size_t cchMnemonic = strlen(pDesc->pszName);
5102 const char *pszDesc = pDesc->pszName + cchMnemonic + 1;
5103 size_t cchDesc = strlen(pszDesc);
5104 uint32_t uFieldValue1 = uVal1 >> pDesc->iFirstBit;
5105 uint32_t uFieldValue2 = uVal2 >> pDesc->iFirstBit;
5106 if (pDesc->cBits < 32)
5107 {
5108 uFieldValue1 &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
5109 uFieldValue2 &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
5110 }
5111
5112 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s - %s%*s= %u (%u)\n" : " %s - %s%*s= %#x (%#x)\n",
5113 pDesc->pszName, pszDesc,
5114 cchMnemonic + 3 + cchDesc < cchWidth ? cchWidth - (cchMnemonic + 3 + cchDesc) : 1, "",
5115 uFieldValue1, uFieldValue2);
5116
5117 iBit = pDesc->iFirstBit + pDesc->cBits - 1U;
5118 pDesc++;
5119 }
5120 else
5121 pHlp->pfnPrintf(pHlp, " %2u - Reserved%*s= %u (%u)\n", iBit, 13 < cchWidth ? cchWidth - 13 : 1, "",
5122 RT_BOOL(uVal1 & RT_BIT_32(iBit)), RT_BOOL(uVal2 & RT_BIT_32(iBit)));
5123 }
5124}
5125
5126
5127/**
5128 * Produces a detailed summary of standard leaf 0x00000001.
5129 *
5130 * @param pHlp The info helper functions.
5131 * @param pCurLeaf The 0x00000001 leaf.
5132 * @param fVerbose Whether to be very verbose or not.
5133 * @param fIntel Set if intel CPU.
5134 */
5135static void cpumR3CpuIdInfoStdLeaf1Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose, bool fIntel)
5136{
5137 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 1);
5138 static const char * const s_apszTypes[4] = { "primary", "overdrive", "MP", "reserved" };
5139 uint32_t uEAX = pCurLeaf->uEax;
5140 uint32_t uEBX = pCurLeaf->uEbx;
5141
5142 pHlp->pfnPrintf(pHlp,
5143 "%36s %2d \tExtended: %d \tEffective: %d\n"
5144 "%36s %2d \tExtended: %d \tEffective: %d\n"
5145 "%36s %d\n"
5146 "%36s %d (%s)\n"
5147 "%36s %#04x\n"
5148 "%36s %d\n"
5149 "%36s %d\n"
5150 "%36s %#04x\n"
5151 ,
5152 "Family:", (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, RTX86GetCpuFamily(uEAX),
5153 "Model:", (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, RTX86GetCpuModel(uEAX, fIntel),
5154 "Stepping:", RTX86GetCpuStepping(uEAX),
5155 "Type:", (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
5156 "APIC ID:", (uEBX >> 24) & 0xff,
5157 "Logical CPUs:",(uEBX >> 16) & 0xff,
5158 "CLFLUSH Size:",(uEBX >> 8) & 0xff,
5159 "Brand ID:", (uEBX >> 0) & 0xff);
5160 if (fVerbose)
5161 {
5162 CPUMCPUID Host = {0};
5163#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5164 ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5165#endif
5166 pHlp->pfnPrintf(pHlp, "Features\n");
5167 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
5168 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf1EdxSubFields, 56);
5169 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf1EcxSubFields, 56);
5170 }
5171 else
5172 {
5173 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf1EdxSubFields, "Features EDX:", 36);
5174 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf1EcxSubFields, "Features ECX:", 36);
5175 }
5176}
5177
5178
5179/**
5180 * Produces a detailed summary of standard leaf 0x00000007.
5181 *
5182 * @param pHlp The info helper functions.
5183 * @param paLeaves The CPUID leaves array.
5184 * @param cLeaves The number of leaves in the array.
5185 * @param pCurLeaf The first 0x00000007 leaf.
5186 * @param fVerbose Whether to be very verbose or not.
5187 */
5188static void cpumR3CpuIdInfoStdLeaf7Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
5189 PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose)
5190{
5191 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 7);
5192 pHlp->pfnPrintf(pHlp, "Structured Extended Feature Flags Enumeration (leaf 7):\n");
5193 for (;;)
5194 {
5195 CPUMCPUID Host = {0};
5196#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5197 ASMCpuIdExSlow(pCurLeaf->uLeaf, 0, pCurLeaf->uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5198#endif
5199
5200 switch (pCurLeaf->uSubLeaf)
5201 {
5202 case 0:
5203 if (fVerbose)
5204 {
5205 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
5206 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub0EbxSubFields, 56);
5207 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, 56);
5208 if (pCurLeaf->uEdx || Host.uEdx)
5209 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, 56);
5210 }
5211 else
5212 {
5213 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aLeaf7Sub0EbxSubFields, "Ext Features EBX:", 36);
5214 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf7Sub0EcxSubFields, "Ext Features ECX:", 36);
5215 if (pCurLeaf->uEdx)
5216 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf7Sub0EdxSubFields, "Ext Features EDX:", 36);
5217 }
5218 break;
5219
5220 default:
5221 if (pCurLeaf->uEdx || pCurLeaf->uEcx || pCurLeaf->uEbx)
5222 pHlp->pfnPrintf(pHlp, "Unknown extended feature sub-leaf #%u: EAX=%#x EBX=%#x ECX=%#x EDX=%#x\n",
5223 pCurLeaf->uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx);
5224 break;
5225
5226 }
5227
5228 /* advance. */
5229 pCurLeaf++;
5230 if ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
5231 || pCurLeaf->uLeaf != 0x7)
5232 break;
5233 }
5234}
5235
5236
5237/**
5238 * Produces a detailed summary of standard leaf 0x0000000d.
5239 *
5240 * @param pHlp The info helper functions.
5241 * @param paLeaves The CPUID leaves array.
5242 * @param cLeaves The number of leaves in the array.
5243 * @param pCurLeaf The first 0x00000007 leaf.
5244 * @param fVerbose Whether to be very verbose or not.
5245 */
5246static void cpumR3CpuIdInfoStdLeaf13Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
5247 PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose)
5248{
5249 RT_NOREF_PV(fVerbose);
5250 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 13);
5251 pHlp->pfnPrintf(pHlp, "Processor Extended State Enumeration (leaf 0xd):\n");
5252 for (uint32_t uSubLeaf = 0; uSubLeaf < 64; uSubLeaf++)
5253 {
5254 CPUMCPUID Host = {0};
5255#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5256 ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5257#endif
5258
5259 switch (uSubLeaf)
5260 {
5261 case 0:
5262 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
5263 pHlp->pfnPrintf(pHlp, "%42s %#x/%#x\n", "XSAVE area cur/max size by XCR0, guest:",
5264 pCurLeaf->uEbx, pCurLeaf->uEcx);
5265 pHlp->pfnPrintf(pHlp, "%42s %#x/%#x\n", "XSAVE area cur/max size by XCR0, host:", Host.uEbx, Host.uEcx);
5266
5267 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
5268 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx), g_aXSaveStateBits,
5269 "Valid XCR0 bits, guest:", 42);
5270 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(Host.uEax, Host.uEdx), g_aXSaveStateBits,
5271 "Valid XCR0 bits, host:", 42);
5272 break;
5273
5274 case 1:
5275 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
5276 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEax, g_aLeaf13Sub1EaxSubFields, "XSAVE features, guest:", 42);
5277 cpumR3CpuIdInfoMnemonicListU32(pHlp, Host.uEax, g_aLeaf13Sub1EaxSubFields, "XSAVE features, host:", 42);
5278
5279 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
5280 pHlp->pfnPrintf(pHlp, "%42s %#x\n", "XSAVE area cur size XCR0|XSS, guest:", pCurLeaf->uEbx);
5281 pHlp->pfnPrintf(pHlp, "%42s %#x\n", "XSAVE area cur size XCR0|XSS, host:", Host.uEbx);
5282
5283 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
5284 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(pCurLeaf->uEcx, pCurLeaf->uEdx), g_aXSaveStateBits,
5285 " Valid IA32_XSS bits, guest:", 42);
5286 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(Host.uEdx, Host.uEcx), g_aXSaveStateBits,
5287 " Valid IA32_XSS bits, host:", 42);
5288 break;
5289
5290 default:
5291 if ( pCurLeaf
5292 && pCurLeaf->uSubLeaf == uSubLeaf
5293 && (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx) )
5294 {
5295 pHlp->pfnPrintf(pHlp, " State #%u, guest: off=%#06x, cb=%#06x %s", uSubLeaf, pCurLeaf->uEbx,
5296 pCurLeaf->uEax, pCurLeaf->uEcx & RT_BIT_32(0) ? "XCR0-bit" : "IA32_XSS-bit");
5297 if (pCurLeaf->uEcx & ~RT_BIT_32(0))
5298 pHlp->pfnPrintf(pHlp, " ECX[reserved]=%#x\n", pCurLeaf->uEcx & ~RT_BIT_32(0));
5299 if (pCurLeaf->uEdx)
5300 pHlp->pfnPrintf(pHlp, " EDX[reserved]=%#x\n", pCurLeaf->uEdx);
5301 pHlp->pfnPrintf(pHlp, " --");
5302 cpumR3CpuIdInfoMnemonicListU64(pHlp, RT_BIT_64(uSubLeaf), g_aXSaveStateBits, NULL, 0);
5303 pHlp->pfnPrintf(pHlp, "\n");
5304 }
5305 if (Host.uEax || Host.uEbx || Host.uEcx || Host.uEdx)
5306 {
5307 pHlp->pfnPrintf(pHlp, " State #%u, host: off=%#06x, cb=%#06x %s", uSubLeaf, Host.uEbx,
5308 Host.uEax, Host.uEcx & RT_BIT_32(0) ? "XCR0-bit" : "IA32_XSS-bit");
5309 if (Host.uEcx & ~RT_BIT_32(0))
5310 pHlp->pfnPrintf(pHlp, " ECX[reserved]=%#x\n", Host.uEcx & ~RT_BIT_32(0));
5311 if (Host.uEdx)
5312 pHlp->pfnPrintf(pHlp, " EDX[reserved]=%#x\n", Host.uEdx);
5313 pHlp->pfnPrintf(pHlp, " --");
5314 cpumR3CpuIdInfoMnemonicListU64(pHlp, RT_BIT_64(uSubLeaf), g_aXSaveStateBits, NULL, 0);
5315 pHlp->pfnPrintf(pHlp, "\n");
5316 }
5317 break;
5318
5319 }
5320
5321 /* advance. */
5322 if (pCurLeaf)
5323 {
5324 while ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
5325 && pCurLeaf->uSubLeaf <= uSubLeaf
5326 && pCurLeaf->uLeaf == UINT32_C(0x0000000d))
5327 pCurLeaf++;
5328 if ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
5329 || pCurLeaf->uLeaf != UINT32_C(0x0000000d))
5330 pCurLeaf = NULL;
5331 }
5332 }
5333}
5334
5335
5336static PCCPUMCPUIDLEAF cpumR3CpuIdInfoRawRange(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
5337 PCCPUMCPUIDLEAF pCurLeaf, uint32_t uUpToLeaf, const char *pszTitle)
5338{
5339 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
5340 && pCurLeaf->uLeaf <= uUpToLeaf)
5341 {
5342 pHlp->pfnPrintf(pHlp,
5343 " %s\n"
5344 " Leaf/sub-leaf eax ebx ecx edx\n", pszTitle);
5345 while ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
5346 && pCurLeaf->uLeaf <= uUpToLeaf)
5347 {
5348 CPUMCPUID Host = {0};
5349#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5350 ASMCpuIdExSlow(pCurLeaf->uLeaf, 0, pCurLeaf->uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5351#endif
5352 pHlp->pfnPrintf(pHlp,
5353 "Gst: %08x/%04x %08x %08x %08x %08x\n"
5354 "Hst: %08x %08x %08x %08x\n",
5355 pCurLeaf->uLeaf, pCurLeaf->uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
5356 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
5357 pCurLeaf++;
5358 }
5359 }
5360
5361 return pCurLeaf;
5362}
5363
5364
5365/**
5366 * Display the guest CpuId leaves.
5367 *
5368 * @param pVM The cross context VM structure.
5369 * @param pHlp The info helper functions.
5370 * @param pszArgs "terse", "default" or "verbose".
5371 */
5372DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
5373{
5374 /*
5375 * Parse the argument.
5376 */
5377 unsigned iVerbosity = 1;
5378 if (pszArgs)
5379 {
5380 pszArgs = RTStrStripL(pszArgs);
5381 if (!strcmp(pszArgs, "terse"))
5382 iVerbosity--;
5383 else if (!strcmp(pszArgs, "verbose"))
5384 iVerbosity++;
5385 }
5386
5387 uint32_t uLeaf;
5388 CPUMCPUID Host = {0};
5389 uint32_t cLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
5390 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
5391 PCCPUMCPUIDLEAF pCurLeaf;
5392 PCCPUMCPUIDLEAF pNextLeaf;
5393 bool const fIntel = RTX86IsIntelCpu(pVM->cpum.s.aGuestCpuIdPatmStd[0].uEbx,
5394 pVM->cpum.s.aGuestCpuIdPatmStd[0].uEcx,
5395 pVM->cpum.s.aGuestCpuIdPatmStd[0].uEdx);
5396
5397 /*
5398 * Standard leaves. Custom raw dump here due to ECX sub-leaves host handling.
5399 */
5400#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5401 uint32_t cHstMax = ASMCpuId_EAX(0);
5402#else
5403 uint32_t cHstMax = 0;
5404#endif
5405 uint32_t cGstMax = paLeaves[0].uLeaf == 0 ? paLeaves[0].uEax : 0;
5406 uint32_t cMax = RT_MAX(cGstMax, cHstMax);
5407 pHlp->pfnPrintf(pHlp,
5408 " Raw Standard CPUID Leaves\n"
5409 " Leaf/sub-leaf eax ebx ecx edx\n");
5410 for (uLeaf = 0, pCurLeaf = paLeaves; uLeaf <= cMax; uLeaf++)
5411 {
5412 uint32_t cMaxSubLeaves = 1;
5413 if (uLeaf == 4 || uLeaf == 7 || uLeaf == 0xb)
5414 cMaxSubLeaves = 16;
5415 else if (uLeaf == 0xd)
5416 cMaxSubLeaves = 128;
5417
5418 for (uint32_t uSubLeaf = 0; uSubLeaf < cMaxSubLeaves; uSubLeaf++)
5419 {
5420#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5421 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5422#endif
5423 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
5424 && pCurLeaf->uLeaf == uLeaf
5425 && pCurLeaf->uSubLeaf == uSubLeaf)
5426 {
5427 pHlp->pfnPrintf(pHlp,
5428 "Gst: %08x/%04x %08x %08x %08x %08x\n"
5429 "Hst: %08x %08x %08x %08x\n",
5430 uLeaf, uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
5431 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
5432 pCurLeaf++;
5433 }
5434 else if ( uLeaf != 0xd
5435 || uSubLeaf <= 1
5436 || Host.uEbx != 0 )
5437 pHlp->pfnPrintf(pHlp,
5438 "Hst: %08x/%04x %08x %08x %08x %08x\n",
5439 uLeaf, uSubLeaf, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
5440
5441 /* Done? */
5442 if ( ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
5443 || pCurLeaf->uLeaf != uLeaf)
5444 && ( (uLeaf == 0x4 && ((Host.uEax & 0x000f) == 0 || (Host.uEax & 0x000f) >= 8))
5445 || (uLeaf == 0x7 && Host.uEax == 0)
5446 || (uLeaf == 0xb && ((Host.uEcx & 0xff00) == 0 || (Host.uEcx & 0xff00) >= 8))
5447 || (uLeaf == 0xb && (Host.uEcx & 0xff) != uSubLeaf)
5448 || (uLeaf == 0xd && uSubLeaf >= 128)
5449 )
5450 )
5451 break;
5452 }
5453 }
5454 pNextLeaf = pCurLeaf;
5455
5456 /*
5457 * If verbose, decode it.
5458 */
5459 if (iVerbosity && paLeaves[0].uLeaf == 0)
5460 pHlp->pfnPrintf(pHlp,
5461 "%36s %.04s%.04s%.04s\n"
5462 "%36s 0x00000000-%#010x\n"
5463 ,
5464 "Name:", &paLeaves[0].uEbx, &paLeaves[0].uEdx, &paLeaves[0].uEcx,
5465 "Supports:", paLeaves[0].uEax);
5466
5467 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x00000001), 0)) != NULL)
5468 cpumR3CpuIdInfoStdLeaf1Details(pHlp, pCurLeaf, iVerbosity > 1, fIntel);
5469
5470 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x00000007), 0)) != NULL)
5471 cpumR3CpuIdInfoStdLeaf7Details(pHlp, paLeaves, cLeaves, pCurLeaf, iVerbosity > 1);
5472
5473 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 0)) != NULL)
5474 cpumR3CpuIdInfoStdLeaf13Details(pHlp, paLeaves, cLeaves, pCurLeaf, iVerbosity > 1);
5475
5476 pCurLeaf = pNextLeaf;
5477
5478 /*
5479 * Hypervisor leaves.
5480 *
5481 * Unlike most of the other leaves reported, the guest hypervisor leaves
5482 * aren't a subset of the host CPUID bits.
5483 */
5484 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0x3fffffff), "Unknown CPUID Leaves");
5485
5486#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5487 ASMCpuIdExSlow(UINT32_C(0x40000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5488#endif
5489 cHstMax = Host.uEax >= UINT32_C(0x40000001) && Host.uEax <= UINT32_C(0x40000fff) ? Host.uEax : 0;
5490 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0x40000000)
5491 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0x40000fff)) : 0;
5492 cMax = RT_MAX(cHstMax, cGstMax);
5493 if (cMax >= UINT32_C(0x40000000))
5494 {
5495 pNextLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, cMax, "Raw Hypervisor CPUID Leaves");
5496
5497 /** @todo dump these in more detail. */
5498
5499 pCurLeaf = pNextLeaf;
5500 }
5501
5502
5503 /*
5504 * Extended. Custom raw dump here due to ECX sub-leaves host handling.
5505 * Implemented after AMD specs.
5506 */
5507 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0x7fffffff), "Unknown CPUID Leaves");
5508
5509#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5510 ASMCpuIdExSlow(UINT32_C(0x80000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5511#endif
5512 cHstMax = RTX86IsValidExtRange(Host.uEax) ? RT_MIN(Host.uEax, UINT32_C(0x80000fff)) : 0;
5513 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0x80000000)
5514 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0x80000fff)) : 0;
5515 cMax = RT_MAX(cHstMax, cGstMax);
5516 if (cMax >= UINT32_C(0x80000000))
5517 {
5518
5519 pHlp->pfnPrintf(pHlp,
5520 " Raw Extended CPUID Leaves\n"
5521 " Leaf/sub-leaf eax ebx ecx edx\n");
5522 PCCPUMCPUIDLEAF pExtLeaf = pCurLeaf;
5523 for (uLeaf = UINT32_C(0x80000000); uLeaf <= cMax; uLeaf++)
5524 {
5525 uint32_t cMaxSubLeaves = 1;
5526 if (uLeaf == UINT32_C(0x8000001d))
5527 cMaxSubLeaves = 16;
5528
5529 for (uint32_t uSubLeaf = 0; uSubLeaf < cMaxSubLeaves; uSubLeaf++)
5530 {
5531#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5532 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5533#endif
5534 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
5535 && pCurLeaf->uLeaf == uLeaf
5536 && pCurLeaf->uSubLeaf == uSubLeaf)
5537 {
5538 pHlp->pfnPrintf(pHlp,
5539 "Gst: %08x/%04x %08x %08x %08x %08x\n"
5540 "Hst: %08x %08x %08x %08x\n",
5541 uLeaf, uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
5542 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
5543 pCurLeaf++;
5544 }
5545 else if ( uLeaf != 0xd
5546 || uSubLeaf <= 1
5547 || Host.uEbx != 0 )
5548 pHlp->pfnPrintf(pHlp,
5549 "Hst: %08x/%04x %08x %08x %08x %08x\n",
5550 uLeaf, uSubLeaf, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
5551
5552 /* Done? */
5553 if ( ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
5554 || pCurLeaf->uLeaf != uLeaf)
5555 && (uLeaf == UINT32_C(0x8000001d) && ((Host.uEax & 0x000f) == 0 || (Host.uEax & 0x000f) >= 8)) )
5556 break;
5557 }
5558 }
5559 pNextLeaf = pCurLeaf;
5560
5561 /*
5562 * Understandable output
5563 */
5564 if (iVerbosity)
5565 pHlp->pfnPrintf(pHlp,
5566 "Ext Name: %.4s%.4s%.4s\n"
5567 "Ext Supports: 0x80000000-%#010x\n",
5568 &pExtLeaf->uEbx, &pExtLeaf->uEdx, &pExtLeaf->uEcx, pExtLeaf->uEax);
5569
5570 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000001), 0);
5571 if (iVerbosity && pCurLeaf)
5572 {
5573 uint32_t uEAX = pCurLeaf->uEax;
5574 pHlp->pfnPrintf(pHlp,
5575 "Family: %d \tExtended: %d \tEffective: %d\n"
5576 "Model: %d \tExtended: %d \tEffective: %d\n"
5577 "Stepping: %d\n"
5578 "Brand ID: %#05x\n",
5579 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, RTX86GetCpuFamily(uEAX),
5580 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, RTX86GetCpuModel(uEAX, fIntel),
5581 RTX86GetCpuStepping(uEAX),
5582 pCurLeaf->uEbx & 0xfff);
5583
5584 if (iVerbosity == 1)
5585 {
5586 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf1EdxSubFields, "Ext Features EDX:", 34);
5587 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aExtLeaf1EdxSubFields, "Ext Features ECX:", 34);
5588 }
5589 else
5590 {
5591#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5592 ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5593#endif
5594 pHlp->pfnPrintf(pHlp, "Ext Features\n");
5595 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
5596 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf1EdxSubFields, 56);
5597 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aExtLeaf1EcxSubFields, 56);
5598 if (Host.uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
5599 {
5600 pHlp->pfnPrintf(pHlp, "SVM Feature Identification (leaf A):\n");
5601#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5602 ASMCpuIdExSlow(0x8000000a, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5603#endif
5604 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x8000000a), 0);
5605 uint32_t const uGstEdx = pCurLeaf ? pCurLeaf->uEdx : 0;
5606 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, uGstEdx, Host.uEdx, g_aExtLeafAEdxSubFields, 56);
5607 }
5608 }
5609 }
5610
5611 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000002), 0)) != NULL)
5612 {
5613 char szString[4*4*3+1] = {0};
5614 uint32_t *pu32 = (uint32_t *)szString;
5615 *pu32++ = pCurLeaf->uEax;
5616 *pu32++ = pCurLeaf->uEbx;
5617 *pu32++ = pCurLeaf->uEcx;
5618 *pu32++ = pCurLeaf->uEdx;
5619 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000003), 0);
5620 if (pCurLeaf)
5621 {
5622 *pu32++ = pCurLeaf->uEax;
5623 *pu32++ = pCurLeaf->uEbx;
5624 *pu32++ = pCurLeaf->uEcx;
5625 *pu32++ = pCurLeaf->uEdx;
5626 }
5627 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000004), 0);
5628 if (pCurLeaf)
5629 {
5630 *pu32++ = pCurLeaf->uEax;
5631 *pu32++ = pCurLeaf->uEbx;
5632 *pu32++ = pCurLeaf->uEcx;
5633 *pu32++ = pCurLeaf->uEdx;
5634 }
5635 pHlp->pfnPrintf(pHlp, "Full Name: \"%s\"\n", szString);
5636 }
5637
5638 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000005), 0)) != NULL)
5639 {
5640 uint32_t uEAX = pCurLeaf->uEax;
5641 uint32_t uEBX = pCurLeaf->uEbx;
5642 uint32_t uECX = pCurLeaf->uEcx;
5643 uint32_t uEDX = pCurLeaf->uEdx;
5644 char sz1[32];
5645 char sz2[32];
5646
5647 pHlp->pfnPrintf(pHlp,
5648 "TLB 2/4M Instr/Uni: %s %3d entries\n"
5649 "TLB 2/4M Data: %s %3d entries\n",
5650 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
5651 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
5652 pHlp->pfnPrintf(pHlp,
5653 "TLB 4K Instr/Uni: %s %3d entries\n"
5654 "TLB 4K Data: %s %3d entries\n",
5655 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
5656 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
5657 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
5658 "L1 Instr Cache Lines Per Tag: %d\n"
5659 "L1 Instr Cache Associativity: %s\n"
5660 "L1 Instr Cache Size: %d KB\n",
5661 (uEDX >> 0) & 0xff,
5662 (uEDX >> 8) & 0xff,
5663 getCacheAss((uEDX >> 16) & 0xff, sz1),
5664 (uEDX >> 24) & 0xff);
5665 pHlp->pfnPrintf(pHlp,
5666 "L1 Data Cache Line Size: %d bytes\n"
5667 "L1 Data Cache Lines Per Tag: %d\n"
5668 "L1 Data Cache Associativity: %s\n"
5669 "L1 Data Cache Size: %d KB\n",
5670 (uECX >> 0) & 0xff,
5671 (uECX >> 8) & 0xff,
5672 getCacheAss((uECX >> 16) & 0xff, sz1),
5673 (uECX >> 24) & 0xff);
5674 }
5675
5676 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000006), 0)) != NULL)
5677 {
5678 uint32_t uEAX = pCurLeaf->uEax;
5679 uint32_t uEBX = pCurLeaf->uEbx;
5680 uint32_t uEDX = pCurLeaf->uEdx;
5681
5682 pHlp->pfnPrintf(pHlp,
5683 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
5684 "L2 TLB 2/4M Data: %s %4d entries\n",
5685 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
5686 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
5687 pHlp->pfnPrintf(pHlp,
5688 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
5689 "L2 TLB 4K Data: %s %4d entries\n",
5690 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
5691 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
5692 pHlp->pfnPrintf(pHlp,
5693 "L2 Cache Line Size: %d bytes\n"
5694 "L2 Cache Lines Per Tag: %d\n"
5695 "L2 Cache Associativity: %s\n"
5696 "L2 Cache Size: %d KB\n",
5697 (uEDX >> 0) & 0xff,
5698 (uEDX >> 8) & 0xf,
5699 getL2CacheAss((uEDX >> 12) & 0xf),
5700 (uEDX >> 16) & 0xffff);
5701 }
5702
5703 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000007), 0)) != NULL)
5704 {
5705#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5706 ASMCpuIdExSlow(UINT32_C(0x80000007), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5707#endif
5708 if (pCurLeaf->uEdx || (Host.uEdx && iVerbosity))
5709 {
5710 if (iVerbosity < 1)
5711 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf7EdxSubFields, "APM Features EDX:", 34);
5712 else
5713 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf7EdxSubFields, 56);
5714 }
5715 }
5716
5717 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000008), 0);
5718 if (pCurLeaf != NULL)
5719 {
5720#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5721 ASMCpuIdExSlow(UINT32_C(0x80000008), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5722#endif
5723 if (pCurLeaf->uEbx || (Host.uEbx && iVerbosity))
5724 {
5725 if (iVerbosity < 1)
5726 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34);
5727 else
5728 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 56);
5729 }
5730
5731 if (iVerbosity)
5732 {
5733 uint32_t uEAX = pCurLeaf->uEax;
5734 uint32_t uECX = pCurLeaf->uEcx;
5735
5736 /** @todo 0x80000008:EAX[23:16] is only defined for AMD. We'll get 0 on Intel. On
5737 * AMD if we get 0, the guest physical address width should be taken from
5738 * 0x80000008:EAX[7:0] instead. Guest Physical address width is relevant
5739 * for guests using nested paging. */
5740 pHlp->pfnPrintf(pHlp,
5741 "Physical Address Width: %d bits\n"
5742 "Virtual Address Width: %d bits\n"
5743 "Guest Physical Address Width: %d bits\n",
5744 (uEAX >> 0) & 0xff,
5745 (uEAX >> 8) & 0xff,
5746 (uEAX >> 16) & 0xff);
5747
5748 /** @todo 0x80000008:ECX is reserved on Intel (we'll get incorrect physical core
5749 * count here). */
5750 pHlp->pfnPrintf(pHlp,
5751 "Physical Core Count: %d\n",
5752 ((uECX >> 0) & 0xff) + 1);
5753 }
5754 }
5755
5756 pCurLeaf = pNextLeaf;
5757 }
5758
5759
5760
5761 /*
5762 * Centaur.
5763 */
5764 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0xbfffffff), "Unknown CPUID Leaves");
5765
5766#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5767 ASMCpuIdExSlow(UINT32_C(0xc0000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5768#endif
5769 cHstMax = Host.uEax >= UINT32_C(0xc0000001) && Host.uEax <= UINT32_C(0xc0000fff)
5770 ? RT_MIN(Host.uEax, UINT32_C(0xc0000fff)) : 0;
5771 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0xc0000000)
5772 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000fff)) : 0;
5773 cMax = RT_MAX(cHstMax, cGstMax);
5774 if (cMax >= UINT32_C(0xc0000000))
5775 {
5776 pNextLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, cMax, "Raw Centaur CPUID Leaves");
5777
5778 /*
5779 * Understandable output
5780 */
5781 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0xc0000000), 0)) != NULL)
5782 pHlp->pfnPrintf(pHlp,
5783 "Centaur Supports: 0xc0000000-%#010x\n",
5784 pCurLeaf->uEax);
5785
5786 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0xc0000001), 0)) != NULL)
5787 {
5788#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5789 ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5790#endif
5791 uint32_t uEdxGst = pCurLeaf->uEdx;
5792 uint32_t uEdxHst = Host.uEdx;
5793
5794 if (iVerbosity == 1)
5795 {
5796 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
5797 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
5798 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
5799 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
5800 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
5801 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
5802 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
5803 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
5804 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
5805 /* possibly indicating MM/HE and MM/HE-E on older chips... */
5806 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
5807 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
5808 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
5809 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
5810 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
5811 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
5812 for (unsigned iBit = 14; iBit < 32; iBit++)
5813 if (uEdxGst & RT_BIT(iBit))
5814 pHlp->pfnPrintf(pHlp, " %d", iBit);
5815 pHlp->pfnPrintf(pHlp, "\n");
5816 }
5817 else
5818 {
5819 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
5820 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
5821 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
5822 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
5823 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
5824 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
5825 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
5826 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
5827 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
5828 /* possibly indicating MM/HE and MM/HE-E on older chips... */
5829 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
5830 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
5831 pHlp->pfnPrintf(pHlp, "PHE - Padlock Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
5832 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
5833 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
5834 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
5835 pHlp->pfnPrintf(pHlp, "14 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
5836 pHlp->pfnPrintf(pHlp, "15 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
5837 pHlp->pfnPrintf(pHlp, "Parallax = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
5838 pHlp->pfnPrintf(pHlp, "Parallax enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
5839 pHlp->pfnPrintf(pHlp, "Overstress = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
5840 pHlp->pfnPrintf(pHlp, "Overstress enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
5841 pHlp->pfnPrintf(pHlp, "TM3 - Temperature Monitoring 3 = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
5842 pHlp->pfnPrintf(pHlp, "TM3-E - TM3 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
5843 pHlp->pfnPrintf(pHlp, "RNG2 - Random Number Generator 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
5844 pHlp->pfnPrintf(pHlp, "RNG2-E - RNG2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
5845 pHlp->pfnPrintf(pHlp, "24 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
5846 pHlp->pfnPrintf(pHlp, "PHE2 - Padlock Hash Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
5847 pHlp->pfnPrintf(pHlp, "PHE2-E - PHE2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
5848 for (unsigned iBit = 27; iBit < 32; iBit++)
5849 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
5850 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", iBit, !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
5851 pHlp->pfnPrintf(pHlp, "\n");
5852 }
5853 }
5854
5855 pCurLeaf = pNextLeaf;
5856 }
5857
5858 /*
5859 * The remainder.
5860 */
5861 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0xffffffff), "Unknown CPUID Leaves");
5862}
5863
5864#endif /* !IN_VBOX_CPU_REPORT */
5865
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette