VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp@ 54561

Last change on this file since 54561 was 54561, checked in by vboxsync, 10 years ago

Moved all the CPUID related code from CPUM.cpp to CPUMR3CpuId.cpp

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 148.6 KB
Line 
1/* $Id: CPUMR3CpuId.cpp 54561 2015-02-27 16:59:02Z vboxsync $ */
2/** @file
3 * CPUM - CPU ID part.
4 */
5
6/*
7 * Copyright (C) 2013-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_CPUM
22#include <VBox/vmm/cpum.h>
23#include <VBox/vmm/hm.h>
24#include <VBox/vmm/ssm.h>
25#include "CPUMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/mm.h>
28
29#include <VBox/err.h>
30#include <iprt/asm-amd64-x86.h>
31#include <iprt/ctype.h>
32#include <iprt/mem.h>
33#include <iprt/string.h>
34
35
36/*******************************************************************************
37* Global Variables *
38*******************************************************************************/
39/**
40 * The intel pentium family.
41 */
42static const CPUMMICROARCH g_aenmIntelFamily06[] =
43{
44 /* [ 0(0x00)] = */ kCpumMicroarch_Intel_P6, /* Pentium Pro A-step (says sandpile.org). */
45 /* [ 1(0x01)] = */ kCpumMicroarch_Intel_P6, /* Pentium Pro */
46 /* [ 2(0x02)] = */ kCpumMicroarch_Intel_Unknown,
47 /* [ 3(0x03)] = */ kCpumMicroarch_Intel_P6_II, /* PII Klamath */
48 /* [ 4(0x04)] = */ kCpumMicroarch_Intel_Unknown,
49 /* [ 5(0x05)] = */ kCpumMicroarch_Intel_P6_II, /* PII Deschutes */
50 /* [ 6(0x06)] = */ kCpumMicroarch_Intel_P6_II, /* Celeron Mendocino. */
51 /* [ 7(0x07)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Katmai. */
52 /* [ 8(0x08)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Coppermine (includes Celeron). */
53 /* [ 9(0x09)] = */ kCpumMicroarch_Intel_P6_M_Banias, /* Pentium/Celeron M Banias. */
54 /* [10(0x0a)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Xeon */
55 /* [11(0x0b)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Tualatin (includes Celeron). */
56 /* [12(0x0c)] = */ kCpumMicroarch_Intel_Unknown,
57 /* [13(0x0d)] = */ kCpumMicroarch_Intel_P6_M_Dothan, /* Pentium/Celeron M Dothan. */
58 /* [14(0x0e)] = */ kCpumMicroarch_Intel_Core_Yonah, /* Core Yonah (Enhanced Pentium M). */
59 /* [15(0x0f)] = */ kCpumMicroarch_Intel_Core2_Merom, /* Merom */
60 /* [16(0x10)] = */ kCpumMicroarch_Intel_Unknown,
61 /* [17(0x11)] = */ kCpumMicroarch_Intel_Unknown,
62 /* [18(0x12)] = */ kCpumMicroarch_Intel_Unknown,
63 /* [19(0x13)] = */ kCpumMicroarch_Intel_Unknown,
64 /* [20(0x14)] = */ kCpumMicroarch_Intel_Unknown,
65 /* [21(0x15)] = */ kCpumMicroarch_Intel_P6_M_Dothan, /* Tolapai - System-on-a-chip. */
66 /* [22(0x16)] = */ kCpumMicroarch_Intel_Core2_Merom,
67 /* [23(0x17)] = */ kCpumMicroarch_Intel_Core2_Penryn,
68 /* [24(0x18)] = */ kCpumMicroarch_Intel_Unknown,
69 /* [25(0x19)] = */ kCpumMicroarch_Intel_Unknown,
70 /* [26(0x1a)] = */ kCpumMicroarch_Intel_Core7_Nehalem,
71 /* [27(0x1b)] = */ kCpumMicroarch_Intel_Unknown,
72 /* [28(0x1c)] = */ kCpumMicroarch_Intel_Atom_Bonnell, /* Diamonville, Pineview, */
73 /* [29(0x1d)] = */ kCpumMicroarch_Intel_Core2_Penryn,
74 /* [30(0x1e)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Clarksfield, Lynnfield, Jasper Forest. */
75 /* [31(0x1f)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Only listed by sandpile.org. 2 cores ABD/HVD, whatever that means. */
76 /* [32(0x20)] = */ kCpumMicroarch_Intel_Unknown,
77 /* [33(0x21)] = */ kCpumMicroarch_Intel_Unknown,
78 /* [34(0x22)] = */ kCpumMicroarch_Intel_Unknown,
79 /* [35(0x23)] = */ kCpumMicroarch_Intel_Unknown,
80 /* [36(0x24)] = */ kCpumMicroarch_Intel_Unknown,
81 /* [37(0x25)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Arrandale, Clarksdale. */
82 /* [38(0x26)] = */ kCpumMicroarch_Intel_Atom_Lincroft,
83 /* [39(0x27)] = */ kCpumMicroarch_Intel_Atom_Saltwell,
84 /* [40(0x28)] = */ kCpumMicroarch_Intel_Unknown,
85 /* [41(0x29)] = */ kCpumMicroarch_Intel_Unknown,
86 /* [42(0x2a)] = */ kCpumMicroarch_Intel_Core7_SandyBridge,
87 /* [43(0x2b)] = */ kCpumMicroarch_Intel_Unknown,
88 /* [44(0x2c)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Gulftown, Westmere-EP. */
89 /* [45(0x2d)] = */ kCpumMicroarch_Intel_Core7_SandyBridge, /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP. */
90 /* [46(0x2e)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Beckton (Xeon). */
91 /* [47(0x2f)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Westmere-EX. */
92 /* [48(0x30)] = */ kCpumMicroarch_Intel_Unknown,
93 /* [49(0x31)] = */ kCpumMicroarch_Intel_Unknown,
94 /* [50(0x32)] = */ kCpumMicroarch_Intel_Unknown,
95 /* [51(0x33)] = */ kCpumMicroarch_Intel_Unknown,
96 /* [52(0x34)] = */ kCpumMicroarch_Intel_Unknown,
97 /* [53(0x35)] = */ kCpumMicroarch_Intel_Atom_Saltwell, /* ?? */
98 /* [54(0x36)] = */ kCpumMicroarch_Intel_Atom_Saltwell, /* Cedarview, ++ */
99 /* [55(0x37)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
100 /* [56(0x38)] = */ kCpumMicroarch_Intel_Unknown,
101 /* [57(0x39)] = */ kCpumMicroarch_Intel_Unknown,
102 /* [58(0x3a)] = */ kCpumMicroarch_Intel_Core7_IvyBridge,
103 /* [59(0x3b)] = */ kCpumMicroarch_Intel_Unknown,
104 /* [60(0x3c)] = */ kCpumMicroarch_Intel_Core7_Haswell,
105 /* [61(0x3d)] = */ kCpumMicroarch_Intel_Core7_Broadwell,
106 /* [62(0x3e)] = */ kCpumMicroarch_Intel_Core7_IvyBridge,
107 /* [63(0x3f)] = */ kCpumMicroarch_Intel_Core7_Haswell,
108 /* [64(0x40)] = */ kCpumMicroarch_Intel_Unknown,
109 /* [65(0x41)] = */ kCpumMicroarch_Intel_Unknown,
110 /* [66(0x42)] = */ kCpumMicroarch_Intel_Unknown,
111 /* [67(0x43)] = */ kCpumMicroarch_Intel_Unknown,
112 /* [68(0x44)] = */ kCpumMicroarch_Intel_Unknown,
113 /* [69(0x45)] = */ kCpumMicroarch_Intel_Core7_Haswell,
114 /* [70(0x46)] = */ kCpumMicroarch_Intel_Core7_Haswell,
115 /* [71(0x47)] = */ kCpumMicroarch_Intel_Unknown,
116 /* [72(0x48)] = */ kCpumMicroarch_Intel_Unknown,
117 /* [73(0x49)] = */ kCpumMicroarch_Intel_Unknown,
118 /* [74(0x4a)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
119 /* [75(0x4b)] = */ kCpumMicroarch_Intel_Unknown,
120 /* [76(0x4c)] = */ kCpumMicroarch_Intel_Unknown,
121 /* [77(0x4d)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
122 /* [78(0x4e)] = */ kCpumMicroarch_Intel_Unknown,
123 /* [79(0x4f)] = */ kCpumMicroarch_Intel_Unknown,
124};
125
126
127
128/**
129 * Figures out the (sub-)micro architecture given a bit of CPUID info.
130 *
131 * @returns Micro architecture.
132 * @param enmVendor The CPU vendor .
133 * @param bFamily The CPU family.
134 * @param bModel The CPU model.
135 * @param bStepping The CPU stepping.
136 */
137VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
138 uint8_t bModel, uint8_t bStepping)
139{
140 if (enmVendor == CPUMCPUVENDOR_AMD)
141 {
142 switch (bFamily)
143 {
144 case 0x02: return kCpumMicroarch_AMD_Am286; /* Not really kosher... */
145 case 0x03: return kCpumMicroarch_AMD_Am386;
146 case 0x23: return kCpumMicroarch_AMD_Am386; /* SX*/
147 case 0x04: return bModel < 14 ? kCpumMicroarch_AMD_Am486 : kCpumMicroarch_AMD_Am486Enh;
148 case 0x05: return bModel < 6 ? kCpumMicroarch_AMD_K5 : kCpumMicroarch_AMD_K6; /* Genode LX is 0x0a, lump it with K6. */
149 case 0x06:
150 switch (bModel)
151 {
152 case 0: kCpumMicroarch_AMD_K7_Palomino;
153 case 1: kCpumMicroarch_AMD_K7_Palomino;
154 case 2: kCpumMicroarch_AMD_K7_Palomino;
155 case 3: kCpumMicroarch_AMD_K7_Spitfire;
156 case 4: kCpumMicroarch_AMD_K7_Thunderbird;
157 case 6: kCpumMicroarch_AMD_K7_Palomino;
158 case 7: kCpumMicroarch_AMD_K7_Morgan;
159 case 8: kCpumMicroarch_AMD_K7_Thoroughbred;
160 case 10: kCpumMicroarch_AMD_K7_Barton; /* Thorton too. */
161 }
162 return kCpumMicroarch_AMD_K7_Unknown;
163 case 0x0f:
164 /*
165 * This family is a friggin mess. Trying my best to make some
166 * sense out of it. Too much happened in the 0x0f family to
167 * lump it all together as K8 (130nm->90nm->65nm, AMD-V, ++).
168 *
169 * Emperical CPUID.01h.EAX evidence from revision guides, wikipedia,
170 * cpu-world.com, and other places:
171 * - 130nm:
172 * - ClawHammer: F7A/SH-CG, F5A/-CG, F4A/-CG, F50/-B0, F48/-C0, F58/-C0,
173 * - SledgeHammer: F50/SH-B0, F48/-C0, F58/-C0, F4A/-CG, F5A/-CG, F7A/-CG, F51/-B3
174 * - Newcastle: FC0/DH-CG (errum #180: FE0/DH-CG), FF0/DH-CG
175 * - Dublin: FC0/-CG, FF0/-CG, F82/CH-CG, F4A/-CG, F48/SH-C0,
176 * - Odessa: FC0/DH-CG (errum #180: FE0/DH-CG)
177 * - Paris: FF0/DH-CG, FC0/DH-CG (errum #180: FE0/DH-CG),
178 * - 90nm:
179 * - Winchester: 10FF0/DH-D0, 20FF0/DH-E3.
180 * - Oakville: 10FC0/DH-D0.
181 * - Georgetown: 10FC0/DH-D0.
182 * - Sonora: 10FC0/DH-D0.
183 * - Venus: 20F71/SH-E4
184 * - Troy: 20F51/SH-E4
185 * - Athens: 20F51/SH-E4
186 * - San Diego: 20F71/SH-E4.
187 * - Lancaster: 20F42/SH-E5
188 * - Newark: 20F42/SH-E5.
189 * - Albany: 20FC2/DH-E6.
190 * - Roma: 20FC2/DH-E6.
191 * - Venice: 20FF0/DH-E3, 20FC2/DH-E6, 20FF2/DH-E6.
192 * - Palermo: 10FC0/DH-D0, 20FF0/DH-E3, 20FC0/DH-E3, 20FC2/DH-E6, 20FF2/DH-E6
193 * - 90nm introducing Dual core:
194 * - Denmark: 20F30/JH-E1, 20F32/JH-E6
195 * - Italy: 20F10/JH-E1, 20F12/JH-E6
196 * - Egypt: 20F10/JH-E1, 20F12/JH-E6
197 * - Toledo: 20F32/JH-E6, 30F72/DH-E6 (single code variant).
198 * - Manchester: 20FB1/BH-E4, 30FF2/BH-E4.
199 * - 90nm 2nd gen opteron ++, AMD-V introduced (might be missing in some cheaper models):
200 * - Santa Ana: 40F32/JH-F2, /-F3
201 * - Santa Rosa: 40F12/JH-F2, 40F13/JH-F3
202 * - Windsor: 40F32/JH-F2, 40F33/JH-F3, C0F13/JH-F3, 40FB2/BH-F2, ??20FB1/BH-E4??.
203 * - Manila: 50FF2/DH-F2, 40FF2/DH-F2
204 * - Orleans: 40FF2/DH-F2, 50FF2/DH-F2, 50FF3/DH-F3.
205 * - Keene: 40FC2/DH-F2.
206 * - Richmond: 40FC2/DH-F2
207 * - Taylor: 40F82/BH-F2
208 * - Trinidad: 40F82/BH-F2
209 *
210 * - 65nm:
211 * - Brisbane: 60FB1/BH-G1, 60FB2/BH-G2.
212 * - Tyler: 60F81/BH-G1, 60F82/BH-G2.
213 * - Sparta: 70FF1/DH-G1, 70FF2/DH-G2.
214 * - Lima: 70FF1/DH-G1, 70FF2/DH-G2.
215 * - Sherman: /-G1, 70FC2/DH-G2.
216 * - Huron: 70FF2/DH-G2.
217 */
218 if (bModel < 0x10)
219 return kCpumMicroarch_AMD_K8_130nm;
220 if (bModel >= 0x60 && bModel < 0x80)
221 return kCpumMicroarch_AMD_K8_65nm;
222 if (bModel >= 0x40)
223 return kCpumMicroarch_AMD_K8_90nm_AMDV;
224 switch (bModel)
225 {
226 case 0x21:
227 case 0x23:
228 case 0x2b:
229 case 0x2f:
230 case 0x37:
231 case 0x3f:
232 return kCpumMicroarch_AMD_K8_90nm_DualCore;
233 }
234 return kCpumMicroarch_AMD_K8_90nm;
235 case 0x10:
236 return kCpumMicroarch_AMD_K10;
237 case 0x11:
238 return kCpumMicroarch_AMD_K10_Lion;
239 case 0x12:
240 return kCpumMicroarch_AMD_K10_Llano;
241 case 0x14:
242 return kCpumMicroarch_AMD_Bobcat;
243 case 0x15:
244 switch (bModel)
245 {
246 case 0x00: return kCpumMicroarch_AMD_15h_Bulldozer; /* Any? prerelease? */
247 case 0x01: return kCpumMicroarch_AMD_15h_Bulldozer; /* Opteron 4200, FX-81xx. */
248 case 0x02: return kCpumMicroarch_AMD_15h_Piledriver; /* Opteron 4300, FX-83xx. */
249 case 0x10: return kCpumMicroarch_AMD_15h_Piledriver; /* A10-5800K for e.g. */
250 case 0x11: /* ?? */
251 case 0x12: /* ?? */
252 case 0x13: return kCpumMicroarch_AMD_15h_Piledriver; /* A10-6800K for e.g. */
253 }
254 return kCpumMicroarch_AMD_15h_Unknown;
255 case 0x16:
256 return kCpumMicroarch_AMD_Jaguar;
257
258 }
259 return kCpumMicroarch_AMD_Unknown;
260 }
261
262 if (enmVendor == CPUMCPUVENDOR_INTEL)
263 {
264 switch (bFamily)
265 {
266 case 3:
267 return kCpumMicroarch_Intel_80386;
268 case 4:
269 return kCpumMicroarch_Intel_80486;
270 case 5:
271 return kCpumMicroarch_Intel_P5;
272 case 6:
273 if (bModel < RT_ELEMENTS(g_aenmIntelFamily06))
274 return g_aenmIntelFamily06[bModel];
275 return kCpumMicroarch_Intel_Atom_Unknown;
276 case 15:
277 switch (bModel)
278 {
279 case 0: return kCpumMicroarch_Intel_NB_Willamette;
280 case 1: return kCpumMicroarch_Intel_NB_Willamette;
281 case 2: return kCpumMicroarch_Intel_NB_Northwood;
282 case 3: return kCpumMicroarch_Intel_NB_Prescott;
283 case 4: return kCpumMicroarch_Intel_NB_Prescott2M; /* ?? */
284 case 5: return kCpumMicroarch_Intel_NB_Unknown; /*??*/
285 case 6: return kCpumMicroarch_Intel_NB_CedarMill;
286 case 7: return kCpumMicroarch_Intel_NB_Gallatin;
287 default: return kCpumMicroarch_Intel_NB_Unknown;
288 }
289 break;
290 /* The following are not kosher but kind of follow intuitively from 6, 5 & 4. */
291 case 1:
292 return kCpumMicroarch_Intel_8086;
293 case 2:
294 return kCpumMicroarch_Intel_80286;
295 }
296 return kCpumMicroarch_Intel_Unknown;
297 }
298
299 if (enmVendor == CPUMCPUVENDOR_VIA)
300 {
301 switch (bFamily)
302 {
303 case 5:
304 switch (bModel)
305 {
306 case 1: return kCpumMicroarch_Centaur_C6;
307 case 4: return kCpumMicroarch_Centaur_C6;
308 case 8: return kCpumMicroarch_Centaur_C2;
309 case 9: return kCpumMicroarch_Centaur_C3;
310 }
311 break;
312
313 case 6:
314 switch (bModel)
315 {
316 case 5: return kCpumMicroarch_VIA_C3_M2;
317 case 6: return kCpumMicroarch_VIA_C3_C5A;
318 case 7: return bStepping < 8 ? kCpumMicroarch_VIA_C3_C5B : kCpumMicroarch_VIA_C3_C5C;
319 case 8: return kCpumMicroarch_VIA_C3_C5N;
320 case 9: return bStepping < 8 ? kCpumMicroarch_VIA_C3_C5XL : kCpumMicroarch_VIA_C3_C5P;
321 case 10: return kCpumMicroarch_VIA_C7_C5J;
322 case 15: return kCpumMicroarch_VIA_Isaiah;
323 }
324 break;
325 }
326 return kCpumMicroarch_VIA_Unknown;
327 }
328
329 if (enmVendor == CPUMCPUVENDOR_CYRIX)
330 {
331 switch (bFamily)
332 {
333 case 4:
334 switch (bModel)
335 {
336 case 9: return kCpumMicroarch_Cyrix_5x86;
337 }
338 break;
339
340 case 5:
341 switch (bModel)
342 {
343 case 2: return kCpumMicroarch_Cyrix_M1;
344 case 4: return kCpumMicroarch_Cyrix_MediaGX;
345 case 5: return kCpumMicroarch_Cyrix_MediaGXm;
346 }
347 break;
348
349 case 6:
350 switch (bModel)
351 {
352 case 0: return kCpumMicroarch_Cyrix_M2;
353 }
354 break;
355
356 }
357 return kCpumMicroarch_Cyrix_Unknown;
358 }
359
360 return kCpumMicroarch_Unknown;
361}
362
363
364/**
365 * Translates a microarchitecture enum value to the corresponding string
366 * constant.
367 *
368 * @returns Read-only string constant (omits "kCpumMicroarch_" prefix). Returns
369 * NULL if the value is invalid.
370 *
371 * @param enmMicroarch The enum value to convert.
372 */
373VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch)
374{
375 switch (enmMicroarch)
376 {
377#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("kCpumMicroarch_") - 1)
378 CASE_RET_STR(kCpumMicroarch_Intel_8086);
379 CASE_RET_STR(kCpumMicroarch_Intel_80186);
380 CASE_RET_STR(kCpumMicroarch_Intel_80286);
381 CASE_RET_STR(kCpumMicroarch_Intel_80386);
382 CASE_RET_STR(kCpumMicroarch_Intel_80486);
383 CASE_RET_STR(kCpumMicroarch_Intel_P5);
384
385 CASE_RET_STR(kCpumMicroarch_Intel_P6);
386 CASE_RET_STR(kCpumMicroarch_Intel_P6_II);
387 CASE_RET_STR(kCpumMicroarch_Intel_P6_III);
388
389 CASE_RET_STR(kCpumMicroarch_Intel_P6_M_Banias);
390 CASE_RET_STR(kCpumMicroarch_Intel_P6_M_Dothan);
391 CASE_RET_STR(kCpumMicroarch_Intel_Core_Yonah);
392
393 CASE_RET_STR(kCpumMicroarch_Intel_Core2_Merom);
394 CASE_RET_STR(kCpumMicroarch_Intel_Core2_Penryn);
395
396 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Nehalem);
397 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Westmere);
398 CASE_RET_STR(kCpumMicroarch_Intel_Core7_SandyBridge);
399 CASE_RET_STR(kCpumMicroarch_Intel_Core7_IvyBridge);
400 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Haswell);
401 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Broadwell);
402 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Skylake);
403 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Cannonlake);
404
405 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Bonnell);
406 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Lincroft);
407 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Saltwell);
408 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Silvermont);
409 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Airmount);
410 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Goldmont);
411 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Unknown);
412
413 CASE_RET_STR(kCpumMicroarch_Intel_NB_Willamette);
414 CASE_RET_STR(kCpumMicroarch_Intel_NB_Northwood);
415 CASE_RET_STR(kCpumMicroarch_Intel_NB_Prescott);
416 CASE_RET_STR(kCpumMicroarch_Intel_NB_Prescott2M);
417 CASE_RET_STR(kCpumMicroarch_Intel_NB_CedarMill);
418 CASE_RET_STR(kCpumMicroarch_Intel_NB_Gallatin);
419 CASE_RET_STR(kCpumMicroarch_Intel_NB_Unknown);
420
421 CASE_RET_STR(kCpumMicroarch_Intel_Unknown);
422
423 CASE_RET_STR(kCpumMicroarch_AMD_Am286);
424 CASE_RET_STR(kCpumMicroarch_AMD_Am386);
425 CASE_RET_STR(kCpumMicroarch_AMD_Am486);
426 CASE_RET_STR(kCpumMicroarch_AMD_Am486Enh);
427 CASE_RET_STR(kCpumMicroarch_AMD_K5);
428 CASE_RET_STR(kCpumMicroarch_AMD_K6);
429
430 CASE_RET_STR(kCpumMicroarch_AMD_K7_Palomino);
431 CASE_RET_STR(kCpumMicroarch_AMD_K7_Spitfire);
432 CASE_RET_STR(kCpumMicroarch_AMD_K7_Thunderbird);
433 CASE_RET_STR(kCpumMicroarch_AMD_K7_Morgan);
434 CASE_RET_STR(kCpumMicroarch_AMD_K7_Thoroughbred);
435 CASE_RET_STR(kCpumMicroarch_AMD_K7_Barton);
436 CASE_RET_STR(kCpumMicroarch_AMD_K7_Unknown);
437
438 CASE_RET_STR(kCpumMicroarch_AMD_K8_130nm);
439 CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm);
440 CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm_DualCore);
441 CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm_AMDV);
442 CASE_RET_STR(kCpumMicroarch_AMD_K8_65nm);
443
444 CASE_RET_STR(kCpumMicroarch_AMD_K10);
445 CASE_RET_STR(kCpumMicroarch_AMD_K10_Lion);
446 CASE_RET_STR(kCpumMicroarch_AMD_K10_Llano);
447 CASE_RET_STR(kCpumMicroarch_AMD_Bobcat);
448 CASE_RET_STR(kCpumMicroarch_AMD_Jaguar);
449
450 CASE_RET_STR(kCpumMicroarch_AMD_15h_Bulldozer);
451 CASE_RET_STR(kCpumMicroarch_AMD_15h_Piledriver);
452 CASE_RET_STR(kCpumMicroarch_AMD_15h_Steamroller);
453 CASE_RET_STR(kCpumMicroarch_AMD_15h_Excavator);
454 CASE_RET_STR(kCpumMicroarch_AMD_15h_Unknown);
455
456 CASE_RET_STR(kCpumMicroarch_AMD_16h_First);
457
458 CASE_RET_STR(kCpumMicroarch_AMD_Unknown);
459
460 CASE_RET_STR(kCpumMicroarch_Centaur_C6);
461 CASE_RET_STR(kCpumMicroarch_Centaur_C2);
462 CASE_RET_STR(kCpumMicroarch_Centaur_C3);
463 CASE_RET_STR(kCpumMicroarch_VIA_C3_M2);
464 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5A);
465 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5B);
466 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5C);
467 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5N);
468 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5XL);
469 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5P);
470 CASE_RET_STR(kCpumMicroarch_VIA_C7_C5J);
471 CASE_RET_STR(kCpumMicroarch_VIA_Isaiah);
472 CASE_RET_STR(kCpumMicroarch_VIA_Unknown);
473
474 CASE_RET_STR(kCpumMicroarch_Cyrix_5x86);
475 CASE_RET_STR(kCpumMicroarch_Cyrix_M1);
476 CASE_RET_STR(kCpumMicroarch_Cyrix_MediaGX);
477 CASE_RET_STR(kCpumMicroarch_Cyrix_MediaGXm);
478 CASE_RET_STR(kCpumMicroarch_Cyrix_M2);
479 CASE_RET_STR(kCpumMicroarch_Cyrix_Unknown);
480
481 CASE_RET_STR(kCpumMicroarch_Unknown);
482
483#undef CASE_RET_STR
484 case kCpumMicroarch_Invalid:
485 case kCpumMicroarch_Intel_End:
486 case kCpumMicroarch_Intel_Core7_End:
487 case kCpumMicroarch_Intel_Atom_End:
488 case kCpumMicroarch_Intel_P6_Core_Atom_End:
489 case kCpumMicroarch_Intel_NB_End:
490 case kCpumMicroarch_AMD_K7_End:
491 case kCpumMicroarch_AMD_K8_End:
492 case kCpumMicroarch_AMD_15h_End:
493 case kCpumMicroarch_AMD_16h_End:
494 case kCpumMicroarch_AMD_End:
495 case kCpumMicroarch_VIA_End:
496 case kCpumMicroarch_Cyrix_End:
497 case kCpumMicroarch_32BitHack:
498 break;
499 /* no default! */
500 }
501
502 return NULL;
503}
504
505
506
507/**
508 * Gets a matching leaf in the CPUID leaf array.
509 *
510 * @returns Pointer to the matching leaf, or NULL if not found.
511 * @param paLeaves The CPUID leaves to search. This is sorted.
512 * @param cLeaves The number of leaves in the array.
513 * @param uLeaf The leaf to locate.
514 * @param uSubLeaf The subleaf to locate. Pass 0 if no subleaves.
515 */
516PCPUMCPUIDLEAF cpumR3CpuIdGetLeaf(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf)
517{
518 /* Lazy bird does linear lookup here since this is only used for the
519 occational CPUID overrides. */
520 for (uint32_t i = 0; i < cLeaves; i++)
521 if ( paLeaves[i].uLeaf == uLeaf
522 && paLeaves[i].uSubLeaf == (uSubLeaf & paLeaves[i].fSubLeafMask))
523 return &paLeaves[i];
524 return NULL;
525}
526
527
528/**
529 * Gets a matching leaf in the CPUID leaf array, converted to a CPUMCPUID.
530 *
531 * @returns true if found, false it not.
532 * @param paLeaves The CPUID leaves to search. This is sorted.
533 * @param cLeaves The number of leaves in the array.
534 * @param uLeaf The leaf to locate.
535 * @param uSubLeaf The subleaf to locate. Pass 0 if no subleaves.
536 * @param pLegacy The legacy output leaf.
537 */
538bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf, PCPUMCPUID pLegacy)
539{
540 PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, uLeaf, uSubLeaf);
541 if (pLeaf)
542 {
543 pLegacy->eax = pLeaf->uEax;
544 pLegacy->ebx = pLeaf->uEbx;
545 pLegacy->ecx = pLeaf->uEcx;
546 pLegacy->edx = pLeaf->uEdx;
547 return true;
548 }
549 return false;
550}
551
552
553/**
554 * Ensures that the CPUID leaf array can hold one more leaf.
555 *
556 * @returns Pointer to the CPUID leaf array (*ppaLeaves) on success. NULL on
557 * failure.
558 * @param pVM Pointer to the VM, used as the heap selector. Passing
559 * NULL uses the host-context heap, otherwise the VM's
560 * hyper heap is used.
561 * @param ppaLeaves Pointer to the variable holding the array pointer
562 * (input/output).
563 * @param cLeaves The current array size.
564 *
565 * @remarks This function will automatically update the R0 and RC pointers when
566 * using the hyper heap, which means @a ppaLeaves and @a cLeaves must
567 * be the corresponding VM's CPUID arrays (which is asserted).
568 */
569static PCPUMCPUIDLEAF cpumR3CpuIdEnsureSpace(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves)
570{
571 uint32_t cAllocated;
572 if (!pVM)
573 cAllocated = RT_ALIGN(cLeaves, 16);
574 else
575 {
576 /*
577 * We're using the hyper heap now, but when the arrays were copied over to it from
578 * the host-context heap, we only copy the exact size and not the ensured size.
579 * See @bugref{7270}.
580 */
581 cAllocated = cLeaves;
582 }
583
584 if (cLeaves + 1 > cAllocated)
585 {
586 void *pvNew;
587#ifndef IN_VBOX_CPU_REPORT
588 if (pVM)
589 {
590 Assert(ppaLeaves == &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
591 Assert(cLeaves == pVM->cpum.s.GuestInfo.cCpuIdLeaves);
592
593 size_t cb = cAllocated * sizeof(**ppaLeaves);
594 size_t cbNew = (cAllocated + 16) * sizeof(**ppaLeaves);
595 int rc = MMR3HyperRealloc(pVM, *ppaLeaves, cb, 32, MM_TAG_CPUM_CPUID, cbNew, &pvNew);
596 if (RT_FAILURE(rc))
597 {
598 *ppaLeaves = NULL;
599 pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR;
600 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR;
601 LogRel(("CPUM: cpumR3CpuIdEnsureSpace: MMR3HyperRealloc failed. rc=%Rrc\n", rc));
602 return NULL;
603 }
604 *ppaLeaves = (PCPUMCPUIDLEAF)pvNew;
605 }
606 else
607#endif
608 {
609 pvNew = RTMemRealloc(*ppaLeaves, (cAllocated + 16) * sizeof(**ppaLeaves));
610 if (!pvNew)
611 {
612 RTMemFree(*ppaLeaves);
613 *ppaLeaves = NULL;
614 return NULL;
615 }
616 *ppaLeaves = (PCPUMCPUIDLEAF)pvNew;
617 }
618 }
619
620#ifndef IN_VBOX_CPU_REPORT
621 /* Update the R0 and RC pointers. */
622 if (pVM)
623 {
624 Assert(ppaLeaves == &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
625 pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, *ppaLeaves);
626 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, *ppaLeaves);
627 }
628#endif
629
630 return *ppaLeaves;
631}
632
633
634/**
635 * Append a CPUID leaf or sub-leaf.
636 *
637 * ASSUMES linear insertion order, so we'll won't need to do any searching or
638 * replace anything. Use cpumR3CpuIdInsert() for those cases.
639 *
640 * @returns VINF_SUCCESS or VERR_NO_MEMORY. On error, *ppaLeaves is freed, so
641 * the caller need do no more work.
642 * @param ppaLeaves Pointer to the the pointer to the array of sorted
643 * CPUID leaves and sub-leaves.
644 * @param pcLeaves Where we keep the leaf count for *ppaLeaves.
645 * @param uLeaf The leaf we're adding.
646 * @param uSubLeaf The sub-leaf number.
647 * @param fSubLeafMask The sub-leaf mask.
648 * @param uEax The EAX value.
649 * @param uEbx The EBX value.
650 * @param uEcx The ECX value.
651 * @param uEdx The EDX value.
652 * @param fFlags The flags.
653 */
654static int cpumR3CollectCpuIdInfoAddOne(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves,
655 uint32_t uLeaf, uint32_t uSubLeaf, uint32_t fSubLeafMask,
656 uint32_t uEax, uint32_t uEbx, uint32_t uEcx, uint32_t uEdx, uint32_t fFlags)
657{
658 if (!cpumR3CpuIdEnsureSpace(NULL /* pVM */, ppaLeaves, *pcLeaves))
659 return VERR_NO_MEMORY;
660
661 PCPUMCPUIDLEAF pNew = &(*ppaLeaves)[*pcLeaves];
662 Assert( *pcLeaves == 0
663 || pNew[-1].uLeaf < uLeaf
664 || (pNew[-1].uLeaf == uLeaf && pNew[-1].uSubLeaf < uSubLeaf) );
665
666 pNew->uLeaf = uLeaf;
667 pNew->uSubLeaf = uSubLeaf;
668 pNew->fSubLeafMask = fSubLeafMask;
669 pNew->uEax = uEax;
670 pNew->uEbx = uEbx;
671 pNew->uEcx = uEcx;
672 pNew->uEdx = uEdx;
673 pNew->fFlags = fFlags;
674
675 *pcLeaves += 1;
676 return VINF_SUCCESS;
677}
678
679
680/**
681 * Inserts a CPU ID leaf, replacing any existing ones.
682 *
683 * When inserting a simple leaf where we already got a series of subleaves with
684 * the same leaf number (eax), the simple leaf will replace the whole series.
685 *
686 * When pVM is NULL, this ASSUMES that the leaves array is still on the normal
687 * host-context heap and has only been allocated/reallocated by the
688 * cpumR3CpuIdEnsureSpace function.
689 *
690 * @returns VBox status code.
691 * @param pVM Pointer to the VM, used as the heap selector.
692 * Passing NULL uses the host-context heap, otherwise
693 * the VM's hyper heap is used.
694 * @param ppaLeaves Pointer to the the pointer to the array of sorted
695 * CPUID leaves and sub-leaves. Must be NULL if using
696 * the hyper heap.
697 * @param pcLeaves Where we keep the leaf count for *ppaLeaves. Must be
698 * NULL if using the hyper heap.
699 * @param pNewLeaf Pointer to the data of the new leaf we're about to
700 * insert.
701 */
702int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf)
703{
704 /*
705 * Validate input parameters if we are using the hyper heap and use the VM's CPUID arrays.
706 */
707 if (pVM)
708 {
709 AssertReturn(!ppaLeaves, VERR_INVALID_PARAMETER);
710 AssertReturn(!pcLeaves, VERR_INVALID_PARAMETER);
711
712 ppaLeaves = &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
713 pcLeaves = &pVM->cpum.s.GuestInfo.cCpuIdLeaves;
714 }
715
716 PCPUMCPUIDLEAF paLeaves = *ppaLeaves;
717 uint32_t cLeaves = *pcLeaves;
718
719 /*
720 * Validate the new leaf a little.
721 */
722 AssertReturn(!(pNewLeaf->fFlags & ~CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED), VERR_INVALID_FLAGS);
723 AssertReturn(pNewLeaf->fSubLeafMask != 0 || pNewLeaf->uSubLeaf == 0, VERR_INVALID_PARAMETER);
724 AssertReturn(RT_IS_POWER_OF_TWO(pNewLeaf->fSubLeafMask + 1), VERR_INVALID_PARAMETER);
725 AssertReturn((pNewLeaf->fSubLeafMask & pNewLeaf->uSubLeaf) == pNewLeaf->uSubLeaf, VERR_INVALID_PARAMETER);
726
727 /*
728 * Find insertion point. The lazy bird uses the same excuse as in
729 * cpumR3CpuIdGetLeaf().
730 */
731 uint32_t i = 0;
732 while ( i < cLeaves
733 && paLeaves[i].uLeaf < pNewLeaf->uLeaf)
734 i++;
735 if ( i < cLeaves
736 && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
737 {
738 if (paLeaves[i].fSubLeafMask != pNewLeaf->fSubLeafMask)
739 {
740 /*
741 * The subleaf mask differs, replace all existing leaves with the
742 * same leaf number.
743 */
744 uint32_t c = 1;
745 while ( i + c < cLeaves
746 && paLeaves[i + c].uSubLeaf == pNewLeaf->uLeaf)
747 c++;
748 if (c > 1 && i + c < cLeaves)
749 {
750 memmove(&paLeaves[i + c], &paLeaves[i + 1], (cLeaves - i - c) * sizeof(paLeaves[0]));
751 *pcLeaves = cLeaves -= c - 1;
752 }
753
754 paLeaves[i] = *pNewLeaf;
755 return VINF_SUCCESS;
756 }
757
758 /* Find subleaf insertion point. */
759 while ( i < cLeaves
760 && paLeaves[i].uSubLeaf < pNewLeaf->uSubLeaf)
761 i++;
762
763 /*
764 * If we've got an exactly matching leaf, replace it.
765 */
766 if ( paLeaves[i].uLeaf == pNewLeaf->uLeaf
767 && paLeaves[i].uSubLeaf == pNewLeaf->uSubLeaf)
768 {
769 paLeaves[i] = *pNewLeaf;
770 return VINF_SUCCESS;
771 }
772 }
773
774 /*
775 * Adding a new leaf at 'i'.
776 */
777 paLeaves = cpumR3CpuIdEnsureSpace(pVM, ppaLeaves, cLeaves);
778 if (!paLeaves)
779 return VERR_NO_MEMORY;
780
781 if (i < cLeaves)
782 memmove(&paLeaves[i + 1], &paLeaves[i], (cLeaves - i) * sizeof(paLeaves[0]));
783 *pcLeaves += 1;
784 paLeaves[i] = *pNewLeaf;
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Removes a range of CPUID leaves.
791 *
792 * This will not reallocate the array.
793 *
794 * @param paLeaves The array of sorted CPUID leaves and sub-leaves.
795 * @param pcLeaves Where we keep the leaf count for @a paLeaves.
796 * @param uFirst The first leaf.
797 * @param uLast The last leaf.
798 */
799void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast)
800{
801 uint32_t cLeaves = *pcLeaves;
802
803 Assert(uFirst <= uLast);
804
805 /*
806 * Find the first one.
807 */
808 uint32_t iFirst = 0;
809 while ( iFirst < cLeaves
810 && paLeaves[iFirst].uLeaf < uFirst)
811 iFirst++;
812
813 /*
814 * Find the end (last + 1).
815 */
816 uint32_t iEnd = iFirst;
817 while ( iEnd < cLeaves
818 && paLeaves[iEnd].uLeaf <= uLast)
819 iEnd++;
820
821 /*
822 * Adjust the array if anything needs removing.
823 */
824 if (iFirst < iEnd)
825 {
826 if (iEnd < cLeaves)
827 memmove(&paLeaves[iFirst], &paLeaves[iEnd], (cLeaves - iEnd) * sizeof(paLeaves[0]));
828 *pcLeaves = cLeaves -= (iEnd - iFirst);
829 }
830}
831
832
833
834/**
835 * Checks if ECX make a difference when reading a given CPUID leaf.
836 *
837 * @returns @c true if it does, @c false if it doesn't.
838 * @param uLeaf The leaf we're reading.
839 * @param pcSubLeaves Number of sub-leaves accessible via ECX.
840 * @param pfFinalEcxUnchanged Whether ECX is passed thru when going beyond the
841 * final sub-leaf.
842 */
843static bool cpumR3IsEcxRelevantForCpuIdLeaf(uint32_t uLeaf, uint32_t *pcSubLeaves, bool *pfFinalEcxUnchanged)
844{
845 *pfFinalEcxUnchanged = false;
846
847 uint32_t auCur[4];
848 uint32_t auPrev[4];
849 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &auPrev[0], &auPrev[1], &auPrev[2], &auPrev[3]);
850
851 /* Look for sub-leaves. */
852 uint32_t uSubLeaf = 1;
853 for (;;)
854 {
855 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
856 if (memcmp(auCur, auPrev, sizeof(auCur)))
857 break;
858
859 /* Advance / give up. */
860 uSubLeaf++;
861 if (uSubLeaf >= 64)
862 {
863 *pcSubLeaves = 1;
864 return false;
865 }
866 }
867
868 /* Count sub-leaves. */
869 uint32_t cRepeats = 0;
870 uSubLeaf = 0;
871 for (;;)
872 {
873 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
874
875 /* Figuring out when to stop isn't entirely straight forward as we need
876 to cover undocumented behavior up to a point and implementation shortcuts. */
877
878 /* 1. Look for zero values. */
879 if ( auCur[0] == 0
880 && auCur[1] == 0
881 && (auCur[2] == 0 || auCur[2] == uSubLeaf)
882 && (auCur[3] == 0 || uLeaf == 0xb /* edx is fixed */) )
883 break;
884
885 /* 2. Look for more than 4 repeating value sets. */
886 if ( auCur[0] == auPrev[0]
887 && auCur[1] == auPrev[1]
888 && ( auCur[2] == auPrev[2]
889 || ( auCur[2] == uSubLeaf
890 && auPrev[2] == uSubLeaf - 1) )
891 && auCur[3] == auPrev[3])
892 {
893 cRepeats++;
894 if (cRepeats > 4)
895 break;
896 }
897 else
898 cRepeats = 0;
899
900 /* 3. Leaf 0xb level type 0 check. */
901 if ( uLeaf == 0xb
902 && (auCur[3] & 0xff00) == 0
903 && (auPrev[3] & 0xff00) == 0)
904 break;
905
906 /* 99. Give up. */
907 if (uSubLeaf >= 128)
908 {
909#ifndef IN_VBOX_CPU_REPORT
910 /* Ok, limit it according to the documentation if possible just to
911 avoid annoying users with these detection issues. */
912 uint32_t cDocLimit = UINT32_MAX;
913 if (uLeaf == 0x4)
914 cDocLimit = 4;
915 else if (uLeaf == 0x7)
916 cDocLimit = 1;
917 else if (uLeaf == 0xf)
918 cDocLimit = 2;
919 if (cDocLimit != UINT32_MAX)
920 {
921 *pfFinalEcxUnchanged = auCur[2] == uSubLeaf;
922 *pcSubLeaves = cDocLimit + 3;
923 return true;
924 }
925#endif
926 *pcSubLeaves = UINT32_MAX;
927 return true;
928 }
929
930 /* Advance. */
931 uSubLeaf++;
932 memcpy(auPrev, auCur, sizeof(auCur));
933 }
934
935 /* Standard exit. */
936 *pfFinalEcxUnchanged = auCur[2] == uSubLeaf;
937 *pcSubLeaves = uSubLeaf + 1 - cRepeats;
938 return true;
939}
940
941
942/**
943 * Gets a CPU ID leaf.
944 *
945 * @returns VBox status code.
946 * @param pVM Pointer to the VM.
947 * @param pLeaf Where to store the found leaf.
948 * @param uLeaf The leaf to locate.
949 * @param uSubLeaf The subleaf to locate. Pass 0 if no subleaves.
950 */
951VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf)
952{
953 PCPUMCPUIDLEAF pcLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
954 uLeaf, uSubLeaf);
955 if (pcLeaf)
956 {
957 memcpy(pLeaf, pcLeaf, sizeof(*pLeaf));
958 return VINF_SUCCESS;
959 }
960
961 return VERR_NOT_FOUND;
962}
963
964
965/**
966 * Inserts a CPU ID leaf, replacing any existing ones.
967 *
968 * @returns VBox status code.
969 * @param pVM Pointer to the VM.
970 * @param pNewLeaf Pointer to the leaf being inserted.
971 */
972VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf)
973{
974 /*
975 * Validate parameters.
976 */
977 AssertReturn(pVM, VERR_INVALID_PARAMETER);
978 AssertReturn(pNewLeaf, VERR_INVALID_PARAMETER);
979
980 /*
981 * Disallow replacing CPU ID leaves that this API currently cannot manage.
982 * These leaves have dependencies on saved-states, see PATMCpuidReplacement().
983 * If you want to modify these leaves, use CPUMSetGuestCpuIdFeature().
984 */
985 if ( pNewLeaf->uLeaf == UINT32_C(0x00000000) /* Standard */
986 || pNewLeaf->uLeaf == UINT32_C(0x80000000) /* Extended */
987 || pNewLeaf->uLeaf == UINT32_C(0xc0000000)) /* Centaur */
988 {
989 return VERR_NOT_SUPPORTED;
990 }
991
992 return cpumR3CpuIdInsert(pVM, NULL /* ppaLeaves */, NULL /* pcLeaves */, pNewLeaf);
993}
994
995/**
996 * Collects CPUID leaves and sub-leaves, returning a sorted array of them.
997 *
998 * @returns VBox status code.
999 * @param ppaLeaves Where to return the array pointer on success.
1000 * Use RTMemFree to release.
1001 * @param pcLeaves Where to return the size of the array on
1002 * success.
1003 */
1004VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
1005{
1006 *ppaLeaves = NULL;
1007 *pcLeaves = 0;
1008
1009 /*
1010 * Try out various candidates. This must be sorted!
1011 */
1012 static struct { uint32_t uMsr; bool fSpecial; } const s_aCandidates[] =
1013 {
1014 { UINT32_C(0x00000000), false },
1015 { UINT32_C(0x10000000), false },
1016 { UINT32_C(0x20000000), false },
1017 { UINT32_C(0x30000000), false },
1018 { UINT32_C(0x40000000), false },
1019 { UINT32_C(0x50000000), false },
1020 { UINT32_C(0x60000000), false },
1021 { UINT32_C(0x70000000), false },
1022 { UINT32_C(0x80000000), false },
1023 { UINT32_C(0x80860000), false },
1024 { UINT32_C(0x8ffffffe), true },
1025 { UINT32_C(0x8fffffff), true },
1026 { UINT32_C(0x90000000), false },
1027 { UINT32_C(0xa0000000), false },
1028 { UINT32_C(0xb0000000), false },
1029 { UINT32_C(0xc0000000), false },
1030 { UINT32_C(0xd0000000), false },
1031 { UINT32_C(0xe0000000), false },
1032 { UINT32_C(0xf0000000), false },
1033 };
1034
1035 for (uint32_t iOuter = 0; iOuter < RT_ELEMENTS(s_aCandidates); iOuter++)
1036 {
1037 uint32_t uLeaf = s_aCandidates[iOuter].uMsr;
1038 uint32_t uEax, uEbx, uEcx, uEdx;
1039 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
1040
1041 /*
1042 * Does EAX look like a typical leaf count value?
1043 */
1044 if ( uEax > uLeaf
1045 && uEax - uLeaf < UINT32_C(0xff)) /* Adjust 0xff limit when exceeded by real HW. */
1046 {
1047 /* Yes, dump them. */
1048 uint32_t cLeaves = uEax - uLeaf + 1;
1049 while (cLeaves-- > 0)
1050 {
1051 /* Check three times here to reduce the chance of CPU migration
1052 resulting in false positives with things like the APIC ID. */
1053 uint32_t cSubLeaves;
1054 bool fFinalEcxUnchanged;
1055 if ( cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged)
1056 && cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged)
1057 && cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged))
1058 {
1059 if (cSubLeaves > 16)
1060 {
1061 /* This shouldn't happen. But in case it does, file all
1062 relevant details in the release log. */
1063 LogRel(("CPUM: VERR_CPUM_TOO_MANY_CPUID_SUBLEAVES! uLeaf=%#x cSubLeaves=%#x\n", uLeaf, cSubLeaves));
1064 LogRel(("------------------ dump of problematic subleaves ------------------\n"));
1065 for (uint32_t uSubLeaf = 0; uSubLeaf < 128; uSubLeaf++)
1066 {
1067 uint32_t auTmp[4];
1068 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auTmp[0], &auTmp[1], &auTmp[2], &auTmp[3]);
1069 LogRel(("CPUM: %#010x, %#010x => %#010x %#010x %#010x %#010x\n",
1070 uLeaf, uSubLeaf, auTmp[0], auTmp[1], auTmp[2], auTmp[3]));
1071 }
1072 LogRel(("----------------- dump of what we've found so far -----------------\n"));
1073 for (uint32_t i = 0 ; i < *pcLeaves; i++)
1074 LogRel(("CPUM: %#010x, %#010x/%#010x => %#010x %#010x %#010x %#010x\n",
1075 (*ppaLeaves)[i].uLeaf, (*ppaLeaves)[i].uSubLeaf, (*ppaLeaves)[i].fSubLeafMask,
1076 (*ppaLeaves)[i].uEax, (*ppaLeaves)[i].uEbx, (*ppaLeaves)[i].uEcx, (*ppaLeaves)[i].uEdx));
1077 LogRel(("\nPlease create a defect on virtualbox.org and attach this log file!\n\n"));
1078 return VERR_CPUM_TOO_MANY_CPUID_SUBLEAVES;
1079 }
1080
1081 for (uint32_t uSubLeaf = 0; uSubLeaf < cSubLeaves; uSubLeaf++)
1082 {
1083 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &uEax, &uEbx, &uEcx, &uEdx);
1084 int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
1085 uLeaf, uSubLeaf, UINT32_MAX, uEax, uEbx, uEcx, uEdx,
1086 uSubLeaf + 1 == cSubLeaves && fFinalEcxUnchanged
1087 ? CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED : 0);
1088 if (RT_FAILURE(rc))
1089 return rc;
1090 }
1091 }
1092 else
1093 {
1094 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
1095 int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
1096 uLeaf, 0, 0, uEax, uEbx, uEcx, uEdx, 0);
1097 if (RT_FAILURE(rc))
1098 return rc;
1099 }
1100
1101 /* next */
1102 uLeaf++;
1103 }
1104 }
1105 /*
1106 * Special CPUIDs needs special handling as they don't follow the
1107 * leaf count principle used above.
1108 */
1109 else if (s_aCandidates[iOuter].fSpecial)
1110 {
1111 bool fKeep = false;
1112 if (uLeaf == 0x8ffffffe && uEax == UINT32_C(0x00494544))
1113 fKeep = true;
1114 else if ( uLeaf == 0x8fffffff
1115 && RT_C_IS_PRINT(RT_BYTE1(uEax))
1116 && RT_C_IS_PRINT(RT_BYTE2(uEax))
1117 && RT_C_IS_PRINT(RT_BYTE3(uEax))
1118 && RT_C_IS_PRINT(RT_BYTE4(uEax))
1119 && RT_C_IS_PRINT(RT_BYTE1(uEbx))
1120 && RT_C_IS_PRINT(RT_BYTE2(uEbx))
1121 && RT_C_IS_PRINT(RT_BYTE3(uEbx))
1122 && RT_C_IS_PRINT(RT_BYTE4(uEbx))
1123 && RT_C_IS_PRINT(RT_BYTE1(uEcx))
1124 && RT_C_IS_PRINT(RT_BYTE2(uEcx))
1125 && RT_C_IS_PRINT(RT_BYTE3(uEcx))
1126 && RT_C_IS_PRINT(RT_BYTE4(uEcx))
1127 && RT_C_IS_PRINT(RT_BYTE1(uEdx))
1128 && RT_C_IS_PRINT(RT_BYTE2(uEdx))
1129 && RT_C_IS_PRINT(RT_BYTE3(uEdx))
1130 && RT_C_IS_PRINT(RT_BYTE4(uEdx)) )
1131 fKeep = true;
1132 if (fKeep)
1133 {
1134 int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
1135 uLeaf, 0, 0, uEax, uEbx, uEcx, uEdx, 0);
1136 if (RT_FAILURE(rc))
1137 return rc;
1138 }
1139 }
1140 }
1141
1142 return VINF_SUCCESS;
1143}
1144
1145
1146/**
1147 * Determines the method the CPU uses to handle unknown CPUID leaves.
1148 *
1149 * @returns VBox status code.
1150 * @param penmUnknownMethod Where to return the method.
1151 * @param pDefUnknown Where to return default unknown values. This
1152 * will be set, even if the resulting method
1153 * doesn't actually needs it.
1154 */
1155VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown)
1156{
1157 uint32_t uLastStd = ASMCpuId_EAX(0);
1158 uint32_t uLastExt = ASMCpuId_EAX(0x80000000);
1159 if (!ASMIsValidExtRange(uLastExt))
1160 uLastExt = 0x80000000;
1161
1162 uint32_t auChecks[] =
1163 {
1164 uLastStd + 1,
1165 uLastStd + 5,
1166 uLastStd + 8,
1167 uLastStd + 32,
1168 uLastStd + 251,
1169 uLastExt + 1,
1170 uLastExt + 8,
1171 uLastExt + 15,
1172 uLastExt + 63,
1173 uLastExt + 255,
1174 0x7fbbffcc,
1175 0x833f7872,
1176 0xefff2353,
1177 0x35779456,
1178 0x1ef6d33e,
1179 };
1180
1181 static const uint32_t s_auValues[] =
1182 {
1183 0xa95d2156,
1184 0x00000001,
1185 0x00000002,
1186 0x00000008,
1187 0x00000000,
1188 0x55773399,
1189 0x93401769,
1190 0x12039587,
1191 };
1192
1193 /*
1194 * Simple method, all zeros.
1195 */
1196 *penmUnknownMethod = CPUMUKNOWNCPUID_DEFAULTS;
1197 pDefUnknown->eax = 0;
1198 pDefUnknown->ebx = 0;
1199 pDefUnknown->ecx = 0;
1200 pDefUnknown->edx = 0;
1201
1202 /*
1203 * Intel has been observed returning the last standard leaf.
1204 */
1205 uint32_t auLast[4];
1206 ASMCpuIdExSlow(uLastStd, 0, 0, 0, &auLast[0], &auLast[1], &auLast[2], &auLast[3]);
1207
1208 uint32_t cChecks = RT_ELEMENTS(auChecks);
1209 while (cChecks > 0)
1210 {
1211 uint32_t auCur[4];
1212 ASMCpuIdExSlow(auChecks[cChecks - 1], 0, 0, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
1213 if (memcmp(auCur, auLast, sizeof(auCur)))
1214 break;
1215 cChecks--;
1216 }
1217 if (cChecks == 0)
1218 {
1219 /* Now, what happens when the input changes? Esp. ECX. */
1220 uint32_t cTotal = 0;
1221 uint32_t cSame = 0;
1222 uint32_t cLastWithEcx = 0;
1223 uint32_t cNeither = 0;
1224 uint32_t cValues = RT_ELEMENTS(s_auValues);
1225 while (cValues > 0)
1226 {
1227 uint32_t uValue = s_auValues[cValues - 1];
1228 uint32_t auLastWithEcx[4];
1229 ASMCpuIdExSlow(uLastStd, uValue, uValue, uValue,
1230 &auLastWithEcx[0], &auLastWithEcx[1], &auLastWithEcx[2], &auLastWithEcx[3]);
1231
1232 cChecks = RT_ELEMENTS(auChecks);
1233 while (cChecks > 0)
1234 {
1235 uint32_t auCur[4];
1236 ASMCpuIdExSlow(auChecks[cChecks - 1], uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
1237 if (!memcmp(auCur, auLast, sizeof(auCur)))
1238 {
1239 cSame++;
1240 if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
1241 cLastWithEcx++;
1242 }
1243 else if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
1244 cLastWithEcx++;
1245 else
1246 cNeither++;
1247 cTotal++;
1248 cChecks--;
1249 }
1250 cValues--;
1251 }
1252
1253 Log(("CPUM: cNeither=%d cSame=%d cLastWithEcx=%d cTotal=%d\n", cNeither, cSame, cLastWithEcx, cTotal));
1254 if (cSame == cTotal)
1255 *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF;
1256 else if (cLastWithEcx == cTotal)
1257 *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX;
1258 else
1259 *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF;
1260 pDefUnknown->eax = auLast[0];
1261 pDefUnknown->ebx = auLast[1];
1262 pDefUnknown->ecx = auLast[2];
1263 pDefUnknown->edx = auLast[3];
1264 return VINF_SUCCESS;
1265 }
1266
1267 /*
1268 * Unchanged register values?
1269 */
1270 cChecks = RT_ELEMENTS(auChecks);
1271 while (cChecks > 0)
1272 {
1273 uint32_t const uLeaf = auChecks[cChecks - 1];
1274 uint32_t cValues = RT_ELEMENTS(s_auValues);
1275 while (cValues > 0)
1276 {
1277 uint32_t uValue = s_auValues[cValues - 1];
1278 uint32_t auCur[4];
1279 ASMCpuIdExSlow(uLeaf, uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
1280 if ( auCur[0] != uLeaf
1281 || auCur[1] != uValue
1282 || auCur[2] != uValue
1283 || auCur[3] != uValue)
1284 break;
1285 cValues--;
1286 }
1287 if (cValues != 0)
1288 break;
1289 cChecks--;
1290 }
1291 if (cChecks == 0)
1292 {
1293 *penmUnknownMethod = CPUMUKNOWNCPUID_PASSTHRU;
1294 return VINF_SUCCESS;
1295 }
1296
1297 /*
1298 * Just go with the simple method.
1299 */
1300 return VINF_SUCCESS;
1301}
1302
1303
1304/**
1305 * Translates a unknow CPUID leaf method into the constant name (sans prefix).
1306 *
1307 * @returns Read only name string.
1308 * @param enmUnknownMethod The method to translate.
1309 */
1310VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUKNOWNCPUID enmUnknownMethod)
1311{
1312 switch (enmUnknownMethod)
1313 {
1314 case CPUMUKNOWNCPUID_DEFAULTS: return "DEFAULTS";
1315 case CPUMUKNOWNCPUID_LAST_STD_LEAF: return "LAST_STD_LEAF";
1316 case CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: return "LAST_STD_LEAF_WITH_ECX";
1317 case CPUMUKNOWNCPUID_PASSTHRU: return "PASSTHRU";
1318
1319 case CPUMUKNOWNCPUID_INVALID:
1320 case CPUMUKNOWNCPUID_END:
1321 case CPUMUKNOWNCPUID_32BIT_HACK:
1322 break;
1323 }
1324 return "Invalid-unknown-CPUID-method";
1325}
1326
1327
1328/**
1329 * Detect the CPU vendor give n the
1330 *
1331 * @returns The vendor.
1332 * @param uEAX EAX from CPUID(0).
1333 * @param uEBX EBX from CPUID(0).
1334 * @param uECX ECX from CPUID(0).
1335 * @param uEDX EDX from CPUID(0).
1336 */
1337VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1338{
1339 if (ASMIsValidStdRange(uEAX))
1340 {
1341 if (ASMIsAmdCpuEx(uEBX, uECX, uEDX))
1342 return CPUMCPUVENDOR_AMD;
1343
1344 if (ASMIsIntelCpuEx(uEBX, uECX, uEDX))
1345 return CPUMCPUVENDOR_INTEL;
1346
1347 if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX))
1348 return CPUMCPUVENDOR_VIA;
1349
1350 if ( uEBX == UINT32_C(0x69727943) /* CyrixInstead */
1351 && uECX == UINT32_C(0x64616574)
1352 && uEDX == UINT32_C(0x736E4978))
1353 return CPUMCPUVENDOR_CYRIX;
1354
1355 /* "Geode by NSC", example: family 5, model 9. */
1356
1357 /** @todo detect the other buggers... */
1358 }
1359
1360 return CPUMCPUVENDOR_UNKNOWN;
1361}
1362
1363
1364/**
1365 * Translates a CPU vendor enum value into the corresponding string constant.
1366 *
1367 * The named can be prefixed with 'CPUMCPUVENDOR_' to construct a valid enum
1368 * value name. This can be useful when generating code.
1369 *
1370 * @returns Read only name string.
1371 * @param enmVendor The CPU vendor value.
1372 */
1373VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor)
1374{
1375 switch (enmVendor)
1376 {
1377 case CPUMCPUVENDOR_INTEL: return "INTEL";
1378 case CPUMCPUVENDOR_AMD: return "AMD";
1379 case CPUMCPUVENDOR_VIA: return "VIA";
1380 case CPUMCPUVENDOR_CYRIX: return "CYRIX";
1381 case CPUMCPUVENDOR_UNKNOWN: return "UNKNOWN";
1382
1383 case CPUMCPUVENDOR_INVALID:
1384 case CPUMCPUVENDOR_32BIT_HACK:
1385 break;
1386 }
1387 return "Invalid-cpu-vendor";
1388}
1389
1390
1391static PCCPUMCPUIDLEAF cpumR3CpuIdFindLeaf(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf)
1392{
1393 /* Could do binary search, doing linear now because I'm lazy. */
1394 PCCPUMCPUIDLEAF pLeaf = paLeaves;
1395 while (cLeaves-- > 0)
1396 {
1397 if (pLeaf->uLeaf == uLeaf)
1398 return pLeaf;
1399 pLeaf++;
1400 }
1401 return NULL;
1402}
1403
1404
1405int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures)
1406{
1407 RT_ZERO(*pFeatures);
1408 if (cLeaves >= 2)
1409 {
1410 AssertLogRelReturn(paLeaves[0].uLeaf == 0, VERR_CPUM_IPE_1);
1411 AssertLogRelReturn(paLeaves[1].uLeaf == 1, VERR_CPUM_IPE_1);
1412
1413 pFeatures->enmCpuVendor = CPUMR3CpuIdDetectVendorEx(paLeaves[0].uEax,
1414 paLeaves[0].uEbx,
1415 paLeaves[0].uEcx,
1416 paLeaves[0].uEdx);
1417 pFeatures->uFamily = ASMGetCpuFamily(paLeaves[1].uEax);
1418 pFeatures->uModel = ASMGetCpuModel(paLeaves[1].uEax, pFeatures->enmCpuVendor == CPUMCPUVENDOR_INTEL);
1419 pFeatures->uStepping = ASMGetCpuStepping(paLeaves[1].uEax);
1420 pFeatures->enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx((CPUMCPUVENDOR)pFeatures->enmCpuVendor,
1421 pFeatures->uFamily,
1422 pFeatures->uModel,
1423 pFeatures->uStepping);
1424
1425 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008);
1426 if (pLeaf)
1427 pFeatures->cMaxPhysAddrWidth = pLeaf->uEax & 0xff;
1428 else if (paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE36)
1429 pFeatures->cMaxPhysAddrWidth = 36;
1430 else
1431 pFeatures->cMaxPhysAddrWidth = 32;
1432
1433 /* Standard features. */
1434 pFeatures->fMsr = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_MSR);
1435 pFeatures->fApic = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_APIC);
1436 pFeatures->fX2Apic = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_X2APIC);
1437 pFeatures->fPse = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE);
1438 pFeatures->fPse36 = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE36);
1439 pFeatures->fPae = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PAE);
1440 pFeatures->fPat = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PAT);
1441 pFeatures->fFxSaveRstor = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_FXSR);
1442 pFeatures->fSysEnter = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_SEP);
1443 pFeatures->fHypervisorPresent = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_HVP);
1444 pFeatures->fMonitorMWait = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_MONITOR);
1445
1446 /* MWAIT/MONITOR leaf. */
1447 PCCPUMCPUIDLEAF const pMWaitLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 5);
1448 if (pMWaitLeaf)
1449 {
1450 pFeatures->fMWaitExtensions = (pMWaitLeaf->uEcx & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1451 == (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1452 }
1453
1454 /* Extended features. */
1455 PCCPUMCPUIDLEAF const pExtLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000001);
1456 if (pExtLeaf)
1457 {
1458 pFeatures->fLongMode = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
1459 pFeatures->fSysCall = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
1460 pFeatures->fNoExecute = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_NX);
1461 pFeatures->fLahfSahf = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
1462 pFeatures->fRdTscP = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
1463 }
1464
1465 if ( pExtLeaf
1466 && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD)
1467 {
1468 /* AMD features. */
1469 pFeatures->fMsr |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MSR);
1470 pFeatures->fApic |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_APIC);
1471 pFeatures->fPse |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PSE);
1472 pFeatures->fPse36 |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PSE36);
1473 pFeatures->fPae |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PAE);
1474 pFeatures->fPat |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PAT);
1475 pFeatures->fFxSaveRstor |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FXSR);
1476 }
1477
1478 /*
1479 * Quirks.
1480 */
1481 pFeatures->fLeakyFxSR = pExtLeaf
1482 && (pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1483 && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD
1484 && pFeatures->uFamily >= 6 /* K7 and up */;
1485 }
1486 else
1487 AssertLogRelReturn(cLeaves == 0, VERR_CPUM_IPE_1);
1488 return VINF_SUCCESS;
1489}
1490
1491
1492/*
1493 *
1494 * Init related code.
1495 * Init related code.
1496 * Init related code.
1497 *
1498 *
1499 */
1500#ifdef VBOX_IN_VMM
1501
1502/**
1503 * Loads MSR range overrides.
1504 *
1505 * This must be called before the MSR ranges are moved from the normal heap to
1506 * the hyper heap!
1507 *
1508 * @returns VBox status code (VMSetError called).
1509 * @param pVM Pointer to the cross context VM structure
1510 * @param pMsrNode The CFGM node with the MSR overrides.
1511 */
1512static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
1513{
1514 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
1515 {
1516 /*
1517 * Assemble a valid MSR range.
1518 */
1519 CPUMMSRRANGE MsrRange;
1520 MsrRange.offCpumCpu = 0;
1521 MsrRange.fReserved = 0;
1522
1523 int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
1524 if (RT_FAILURE(rc))
1525 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
1526
1527 rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
1528 if (RT_FAILURE(rc))
1529 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
1530 MsrRange.szName, rc);
1531
1532 rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
1533 if (RT_FAILURE(rc))
1534 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
1535 MsrRange.szName, rc);
1536
1537 char szType[32];
1538 rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
1539 if (RT_FAILURE(rc))
1540 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
1541 MsrRange.szName, rc);
1542 if (!RTStrICmp(szType, "FixedValue"))
1543 {
1544 MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
1545 MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
1546
1547 rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uValue, 0);
1548 if (RT_FAILURE(rc))
1549 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
1550 MsrRange.szName, rc);
1551
1552 rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
1553 if (RT_FAILURE(rc))
1554 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
1555 MsrRange.szName, rc);
1556
1557 rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
1558 if (RT_FAILURE(rc))
1559 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
1560 MsrRange.szName, rc);
1561 }
1562 else
1563 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
1564 "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
1565
1566 /*
1567 * Insert the range into the table (replaces/splits/shrinks existing
1568 * MSR ranges).
1569 */
1570 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
1571 &MsrRange);
1572 if (RT_FAILURE(rc))
1573 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
1574 }
1575
1576 return VINF_SUCCESS;
1577}
1578
1579
1580/**
1581 * Loads CPUID leaf overrides.
1582 *
1583 * This must be called before the CPUID leaves are moved from the normal
1584 * heap to the hyper heap!
1585 *
1586 * @returns VBox status code (VMSetError called).
1587 * @param pVM Pointer to the cross context VM structure
1588 * @param pParentNode The CFGM node with the CPUID leaves.
1589 * @param pszLabel How to label the overrides we're loading.
1590 */
1591static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
1592{
1593 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
1594 {
1595 /*
1596 * Get the leaf and subleaf numbers.
1597 */
1598 char szName[128];
1599 int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
1600 if (RT_FAILURE(rc))
1601 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
1602
1603 /* The leaf number is either specified directly or thru the node name. */
1604 uint32_t uLeaf;
1605 rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
1606 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1607 {
1608 rc = RTStrToUInt32Full(szName, 16, &uLeaf);
1609 if (rc != VINF_SUCCESS)
1610 return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
1611 "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
1612 }
1613 else if (RT_FAILURE(rc))
1614 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
1615 pszLabel, szName, rc);
1616
1617 uint32_t uSubLeaf;
1618 rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
1619 if (RT_FAILURE(rc))
1620 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
1621 pszLabel, szName, rc);
1622
1623 uint32_t fSubLeafMask;
1624 rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
1625 if (RT_FAILURE(rc))
1626 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
1627 pszLabel, szName, rc);
1628
1629 /*
1630 * Look up the specified leaf, since the output register values
1631 * defaults to any existing values. This allows overriding a single
1632 * register, without needing to know the other values.
1633 */
1634 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
1635 uLeaf, uSubLeaf);
1636 CPUMCPUIDLEAF Leaf;
1637 if (pLeaf)
1638 Leaf = *pLeaf;
1639 else
1640 RT_ZERO(Leaf);
1641 Leaf.uLeaf = uLeaf;
1642 Leaf.uSubLeaf = uSubLeaf;
1643 Leaf.fSubLeafMask = fSubLeafMask;
1644
1645 rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
1646 if (RT_FAILURE(rc))
1647 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
1648 pszLabel, szName, rc);
1649 rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
1650 if (RT_FAILURE(rc))
1651 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
1652 pszLabel, szName, rc);
1653 rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
1654 if (RT_FAILURE(rc))
1655 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
1656 pszLabel, szName, rc);
1657 rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
1658 if (RT_FAILURE(rc))
1659 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
1660 pszLabel, szName, rc);
1661
1662 /*
1663 * Insert the leaf into the table (replaces existing ones).
1664 */
1665 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves,
1666 &Leaf);
1667 if (RT_FAILURE(rc))
1668 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
1669 }
1670
1671 return VINF_SUCCESS;
1672}
1673
1674
1675
1676/**
1677 * Fetches overrides for a CPUID leaf.
1678 *
1679 * @returns VBox status code.
1680 * @param pLeaf The leaf to load the overrides into.
1681 * @param pCfgNode The CFGM node containing the overrides
1682 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
1683 * @param iLeaf The CPUID leaf number.
1684 */
1685static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)
1686{
1687 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);
1688 if (pLeafNode)
1689 {
1690 uint32_t u32;
1691 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
1692 if (RT_SUCCESS(rc))
1693 pLeaf->eax = u32;
1694 else
1695 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
1696
1697 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
1698 if (RT_SUCCESS(rc))
1699 pLeaf->ebx = u32;
1700 else
1701 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
1702
1703 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
1704 if (RT_SUCCESS(rc))
1705 pLeaf->ecx = u32;
1706 else
1707 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
1708
1709 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
1710 if (RT_SUCCESS(rc))
1711 pLeaf->edx = u32;
1712 else
1713 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
1714
1715 }
1716 return VINF_SUCCESS;
1717}
1718
1719
1720/**
1721 * Load the overrides for a set of CPUID leaves.
1722 *
1723 * @returns VBox status code.
1724 * @param paLeaves The leaf array.
1725 * @param cLeaves The number of leaves.
1726 * @param uStart The start leaf number.
1727 * @param pCfgNode The CFGM node containing the overrides
1728 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
1729 */
1730static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
1731{
1732 for (uint32_t i = 0; i < cLeaves; i++)
1733 {
1734 int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i);
1735 if (RT_FAILURE(rc))
1736 return rc;
1737 }
1738
1739 return VINF_SUCCESS;
1740}
1741
1742/**
1743 * Init a set of host CPUID leaves.
1744 *
1745 * @returns VBox status code.
1746 * @param paLeaves The leaf array.
1747 * @param cLeaves The number of leaves.
1748 * @param uStart The start leaf number.
1749 * @param pCfgNode The /CPUM/HostCPUID/ node.
1750 */
1751static int cpumR3CpuIdInitHostSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
1752{
1753 /* Using the ECX variant for all of them can't hurt... */
1754 for (uint32_t i = 0; i < cLeaves; i++)
1755 ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);
1756
1757 /* Load CPUID leaf override; we currently don't care if the user
1758 specifies features the host CPU doesn't support. */
1759 return cpumR3CpuIdInitLoadOverrideSet(uStart, paLeaves, cLeaves, pCfgNode);
1760}
1761
1762
1763static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)
1764{
1765 /*
1766 * Install the CPUID information.
1767 */
1768 int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32,
1769 MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3);
1770
1771 AssertLogRelRCReturn(rc, rc);
1772
1773
1774 pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
1775 pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
1776 Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
1777 Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
1778
1779 /*
1780 * Explode the guest CPU features.
1781 */
1782 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
1783 AssertLogRelRCReturn(rc, rc);
1784
1785 /*
1786 * Adjust the scalable bus frequency according to the CPUID information
1787 * we're now using.
1788 */
1789 if (CPUMMICROARCH_IS_INTEL_CORE7(pVM->cpum.s.GuestFeatures.enmMicroarch))
1790 pCPUM->GuestInfo.uScalableBusFreq = pCPUM->GuestFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
1791 ? UINT64_C(100000000) /* 100MHz */
1792 : UINT64_C(133333333); /* 133MHz */
1793
1794 /*
1795 * Populate the legacy arrays. Currently used for everything, later only
1796 * for patch manager.
1797 */
1798 struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
1799 {
1800 { pCPUM->aGuestCpuIdStd, RT_ELEMENTS(pCPUM->aGuestCpuIdStd), 0x00000000 },
1801 { pCPUM->aGuestCpuIdExt, RT_ELEMENTS(pCPUM->aGuestCpuIdExt), 0x80000000 },
1802 { pCPUM->aGuestCpuIdCentaur, RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 },
1803 };
1804 for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
1805 {
1806 uint32_t cLeft = aOldRanges[i].cCpuIds;
1807 uint32_t uLeaf = aOldRanges[i].uBase + cLeft;
1808 PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
1809 while (cLeft-- > 0)
1810 {
1811 uLeaf--;
1812 pLegacyLeaf--;
1813
1814 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf,
1815 0 /* uSubLeaf */);
1816 if (pLeaf)
1817 {
1818 pLegacyLeaf->eax = pLeaf->uEax;
1819 pLegacyLeaf->ebx = pLeaf->uEbx;
1820 pLegacyLeaf->ecx = pLeaf->uEcx;
1821 pLegacyLeaf->edx = pLeaf->uEdx;
1822 }
1823 else
1824 *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId;
1825 }
1826 }
1827
1828 pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId;
1829
1830 return VINF_SUCCESS;
1831}
1832
1833
1834/**
1835 * Initializes the emulated CPU's cpuid information.
1836 *
1837 * @returns VBox status code.
1838 * @param pVM Pointer to the VM.
1839 */
1840int cpumR3CpuIdInit(PVM pVM)
1841{
1842 PCPUM pCPUM = &pVM->cpum.s;
1843 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
1844 int rc;
1845
1846#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
1847 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
1848 { \
1849 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
1850 (a_pLeafReg) &= ~(uint32_t)(fMask); \
1851 }
1852#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
1853 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
1854 { \
1855 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
1856 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
1857 }
1858
1859 /*
1860 * Read the configuration.
1861 */
1862 /** @cfgm{/CPUM/SyntheticCpu, boolean, false}
1863 * Enables the Synthetic CPU. The Vendor ID and Processor Name are
1864 * completely overridden by VirtualBox custom strings. Some
1865 * CPUID information is withheld, like the cache info.
1866 *
1867 * This is obsoleted by PortableCpuIdLevel. */
1868 bool fSyntheticCpu;
1869 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &fSyntheticCpu, false);
1870 AssertRCReturn(rc, rc);
1871
1872 /** @cfgm{/CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}
1873 * When non-zero CPUID features that could cause portability issues will be
1874 * stripped. The higher the value the more features gets stripped. Higher
1875 * values should only be used when older CPUs are involved since it may
1876 * harm performance and maybe also cause problems with specific guests. */
1877 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0);
1878 AssertLogRelRCReturn(rc, rc);
1879
1880 /** @cfgm{/CPUM/GuestCpuName, string}
1881 * The name of the CPU we're to emulate. The default is the host CPU.
1882 * Note! CPUs other than "host" one is currently unsupported. */
1883 char szCpuName[128];
1884 rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host");
1885 AssertLogRelRCReturn(rc, rc);
1886
1887 /** @cfgm{/CPUM/CMPXCHG16B, boolean, false}
1888 * Expose CMPXCHG16B to the guest if supported by the host.
1889 */
1890 bool fCmpXchg16b;
1891 rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false);
1892 AssertLogRelRCReturn(rc, rc);
1893
1894 /** @cfgm{/CPUM/MONITOR, boolean, true}
1895 * Expose MONITOR/MWAIT instructions to the guest.
1896 */
1897 bool fMonitor;
1898 rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true);
1899 AssertLogRelRCReturn(rc, rc);
1900
1901 /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
1902 * Expose MWAIT extended features to the guest. For now we expose just MWAIT
1903 * break on interrupt feature (bit 1).
1904 */
1905 bool fMWaitExtensions;
1906 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false);
1907 AssertLogRelRCReturn(rc, rc);
1908
1909 /** @cfgm{/CPUM/SSE4.1, boolean, true}
1910 * Expose SSE4.1 to the guest if available.
1911 */
1912 bool fSse41;
1913 rc = CFGMR3QueryBoolDef(pCpumCfg, "SSE4.1", &fSse41, true);
1914 AssertLogRelRCReturn(rc, rc);
1915
1916 /** @cfgm{/CPUM/SSE4.2, boolean, true}
1917 * Expose SSE4.2 to the guest if available.
1918 */
1919 bool fSse42;
1920 rc = CFGMR3QueryBoolDef(pCpumCfg, "SSE4.2", &fSse42, true);
1921 AssertLogRelRCReturn(rc, rc);
1922
1923 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
1924 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
1925 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
1926 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
1927 */
1928 bool fNt4LeafLimit;
1929 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false);
1930 AssertLogRelRCReturn(rc, rc);
1931
1932 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
1933 * Restrict the reported CPU family+model+stepping of intel CPUs. This is
1934 * probably going to be a temporary hack, so don't depend on this.
1935 * The 1st byte of the value is the stepping, the 2nd byte value is the model
1936 * number and the 3rd byte value is the family, and the 4th value must be zero.
1937 */
1938 uint32_t uMaxIntelFamilyModelStep;
1939 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);
1940 AssertLogRelRCReturn(rc, rc);
1941
1942 /*
1943 * Get the guest CPU data from the database and/or the host.
1944 */
1945 rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo);
1946 if (RT_FAILURE(rc))
1947 return rc == VERR_CPUM_DB_CPU_NOT_FOUND
1948 ? VMSetError(pVM, rc, RT_SRC_POS,
1949 "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName)
1950 : rc;
1951
1952 /** @cfgm{/CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
1953 * Overrides the guest MSRs.
1954 */
1955 rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
1956
1957 /** @cfgm{/CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
1958 * Overrides the CPUID leaf values (from the host CPU usually) used for
1959 * calculating the guest CPUID leaves. This can be used to preserve the CPUID
1960 * values when moving a VM to a different machine. Another use is restricting
1961 * (or extending) the feature set exposed to the guest. */
1962 if (RT_SUCCESS(rc))
1963 rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
1964
1965 if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
1966 rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
1967 "Found unsupported configuration node '/CPUM/CPUID/'. "
1968 "Please use IMachine::setCPUIDLeaf() instead.");
1969
1970 /*
1971 * Pre-explode the CPUID info.
1972 */
1973 if (RT_SUCCESS(rc))
1974 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
1975 if (RT_FAILURE(rc))
1976 {
1977 RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3);
1978 pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL;
1979 RTMemFree(pCPUM->GuestInfo.paMsrRangesR3);
1980 pCPUM->GuestInfo.paMsrRangesR3 = NULL;
1981 return rc;
1982 }
1983
1984
1985 /* ... split this function about here ... */
1986
1987
1988 /* Cpuid 1:
1989 * Only report features we can support.
1990 *
1991 * Note! When enabling new features the Synthetic CPU and Portable CPUID
1992 * options may require adjusting (i.e. stripping what was enabled).
1993 */
1994 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
1995 1, 0); /* Note! Must refetch when used later. */
1996 AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
1997 pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU
1998 | X86_CPUID_FEATURE_EDX_VME
1999 | X86_CPUID_FEATURE_EDX_DE
2000 | X86_CPUID_FEATURE_EDX_PSE
2001 | X86_CPUID_FEATURE_EDX_TSC
2002 | X86_CPUID_FEATURE_EDX_MSR
2003 //| X86_CPUID_FEATURE_EDX_PAE - set later if configured.
2004 | X86_CPUID_FEATURE_EDX_MCE
2005 | X86_CPUID_FEATURE_EDX_CX8
2006 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
2007 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */
2008 //| X86_CPUID_FEATURE_EDX_SEP
2009 | X86_CPUID_FEATURE_EDX_MTRR
2010 | X86_CPUID_FEATURE_EDX_PGE
2011 | X86_CPUID_FEATURE_EDX_MCA
2012 | X86_CPUID_FEATURE_EDX_CMOV
2013 | X86_CPUID_FEATURE_EDX_PAT
2014 | X86_CPUID_FEATURE_EDX_PSE36
2015 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
2016 | X86_CPUID_FEATURE_EDX_CLFSH
2017 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
2018 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.
2019 | X86_CPUID_FEATURE_EDX_MMX
2020 | X86_CPUID_FEATURE_EDX_FXSR
2021 | X86_CPUID_FEATURE_EDX_SSE
2022 | X86_CPUID_FEATURE_EDX_SSE2
2023 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
2024 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.
2025 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
2026 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
2027 | 0;
2028 pStdFeatureLeaf->uEcx &= 0
2029 | X86_CPUID_FEATURE_ECX_SSE3
2030 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
2031 | ((fMonitor && pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
2032 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
2033 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
2034 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
2035 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
2036 | X86_CPUID_FEATURE_ECX_SSSE3
2037 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
2038 | (fCmpXchg16b ? X86_CPUID_FEATURE_ECX_CX16 : 0)
2039 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
2040 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
2041 | (fSse41 ? X86_CPUID_FEATURE_ECX_SSE4_1 : 0)
2042 | (fSse42 ? X86_CPUID_FEATURE_ECX_SSE4_2 : 0)
2043 /* ECX Bit 21 - x2APIC support - not yet. */
2044 // | X86_CPUID_FEATURE_ECX_X2APIC
2045 /* ECX Bit 23 - POPCNT instruction. */
2046 //| X86_CPUID_FEATURE_ECX_POPCNT
2047 | 0;
2048 if (pCPUM->u8PortableCpuIdLevel > 0)
2049 {
2050 PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
2051 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
2052 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
2053 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE4_1, X86_CPUID_FEATURE_ECX_SSE4_1);
2054 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE4_2, X86_CPUID_FEATURE_ECX_SSE4_2);
2055 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16);
2056 PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
2057 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE);
2058 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
2059 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
2060
2061 Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP
2062 | X86_CPUID_FEATURE_EDX_PSN
2063 | X86_CPUID_FEATURE_EDX_DS
2064 | X86_CPUID_FEATURE_EDX_ACPI
2065 | X86_CPUID_FEATURE_EDX_SS
2066 | X86_CPUID_FEATURE_EDX_TM
2067 | X86_CPUID_FEATURE_EDX_PBE
2068 )));
2069 Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_PCLMUL
2070 | X86_CPUID_FEATURE_ECX_DTES64
2071 | X86_CPUID_FEATURE_ECX_CPLDS
2072 | X86_CPUID_FEATURE_ECX_VMX
2073 | X86_CPUID_FEATURE_ECX_SMX
2074 | X86_CPUID_FEATURE_ECX_EST
2075 | X86_CPUID_FEATURE_ECX_TM2
2076 | X86_CPUID_FEATURE_ECX_CNTXID
2077 | X86_CPUID_FEATURE_ECX_FMA
2078 | X86_CPUID_FEATURE_ECX_CX16
2079 | X86_CPUID_FEATURE_ECX_TPRUPDATE
2080 | X86_CPUID_FEATURE_ECX_PDCM
2081 | X86_CPUID_FEATURE_ECX_DCA
2082 | X86_CPUID_FEATURE_ECX_MOVBE
2083 | X86_CPUID_FEATURE_ECX_AES
2084 | X86_CPUID_FEATURE_ECX_POPCNT
2085 | X86_CPUID_FEATURE_ECX_XSAVE
2086 | X86_CPUID_FEATURE_ECX_OSXSAVE
2087 | X86_CPUID_FEATURE_ECX_AVX
2088 )));
2089 }
2090
2091 /* Cpuid 0x80000001:
2092 * Only report features we can support.
2093 *
2094 * Note! When enabling new features the Synthetic CPU and Portable CPUID
2095 * options may require adjusting (i.e. stripping what was enabled).
2096 *
2097 * ASSUMES that this is ALWAYS the AMD defined feature set if present.
2098 */
2099 PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
2100 UINT32_C(0x80000001), 0); /* Note! Must refetch when used later. */
2101 if (pExtFeatureLeaf)
2102 {
2103 pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU
2104 | X86_CPUID_AMD_FEATURE_EDX_VME
2105 | X86_CPUID_AMD_FEATURE_EDX_DE
2106 | X86_CPUID_AMD_FEATURE_EDX_PSE
2107 | X86_CPUID_AMD_FEATURE_EDX_TSC
2108 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
2109 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.
2110 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
2111 | X86_CPUID_AMD_FEATURE_EDX_CX8
2112 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
2113 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */
2114 //| X86_CPUID_EXT_FEATURE_EDX_SEP
2115 | X86_CPUID_AMD_FEATURE_EDX_MTRR
2116 | X86_CPUID_AMD_FEATURE_EDX_PGE
2117 | X86_CPUID_AMD_FEATURE_EDX_MCA
2118 | X86_CPUID_AMD_FEATURE_EDX_CMOV
2119 | X86_CPUID_AMD_FEATURE_EDX_PAT
2120 | X86_CPUID_AMD_FEATURE_EDX_PSE36
2121 //| X86_CPUID_EXT_FEATURE_EDX_NX - not virtualized, requires PAE.
2122 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
2123 | X86_CPUID_AMD_FEATURE_EDX_MMX
2124 | X86_CPUID_AMD_FEATURE_EDX_FXSR
2125 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
2126 //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
2127 | X86_CPUID_EXT_FEATURE_EDX_RDTSCP
2128 //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary
2129 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
2130 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
2131 | 0;
2132 pExtFeatureLeaf->uEcx &= 0
2133 //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
2134 //| X86_CPUID_AMD_FEATURE_ECX_CMPL
2135 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
2136 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
2137 /* Note: This could prevent teleporting from AMD to Intel CPUs! */
2138 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
2139 //| X86_CPUID_AMD_FEATURE_ECX_ABM
2140 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A
2141 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
2142 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
2143 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
2144 //| X86_CPUID_AMD_FEATURE_ECX_IBS
2145 //| X86_CPUID_AMD_FEATURE_ECX_SSE5
2146 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
2147 //| X86_CPUID_AMD_FEATURE_ECX_WDT
2148 | 0;
2149 if (pCPUM->u8PortableCpuIdLevel > 0)
2150 {
2151 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
2152 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
2153 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
2154 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
2155 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
2156 PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
2157 PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
2158
2159 Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL
2160 | X86_CPUID_AMD_FEATURE_ECX_SVM
2161 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
2162 | X86_CPUID_AMD_FEATURE_ECX_CR8L
2163 | X86_CPUID_AMD_FEATURE_ECX_ABM
2164 | X86_CPUID_AMD_FEATURE_ECX_SSE4A
2165 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
2166 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
2167 | X86_CPUID_AMD_FEATURE_ECX_OSVW
2168 | X86_CPUID_AMD_FEATURE_ECX_IBS
2169 | X86_CPUID_AMD_FEATURE_ECX_SSE5
2170 | X86_CPUID_AMD_FEATURE_ECX_SKINIT
2171 | X86_CPUID_AMD_FEATURE_ECX_WDT
2172 | UINT32_C(0xffffc000)
2173 )));
2174 Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10)
2175 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
2176 | RT_BIT(18)
2177 | RT_BIT(19)
2178 | RT_BIT(21)
2179 | X86_CPUID_AMD_FEATURE_EDX_AXMMX
2180 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
2181 | RT_BIT(28)
2182 )));
2183 }
2184 }
2185
2186 /*
2187 * Hide HTT, multicode, SMP, whatever.
2188 * (APIC-ID := 0 and #LogCpus := 0)
2189 */
2190 pStdFeatureLeaf->uEbx &= 0x0000ffff;
2191#ifdef VBOX_WITH_MULTI_CORE
2192 if (pVM->cCpus > 1)
2193 {
2194 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
2195 pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16);
2196 pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */
2197 }
2198#endif
2199
2200 /* Cpuid 2:
2201 * Intel: Cache and TLB information
2202 * AMD: Reserved
2203 * VIA: Reserved
2204 * Safe to expose; restrict the number of calls to 1 for the portable case.
2205 */
2206 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0);
2207 if ( pCPUM->u8PortableCpuIdLevel > 0
2208 && pCurLeaf
2209 && (pCurLeaf->uEax & 0xff) > 1)
2210 {
2211 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
2212 pCurLeaf->uEax &= UINT32_C(0xfffffffe);
2213 }
2214
2215 /* Cpuid 3:
2216 * Intel: EAX, EBX - reserved (transmeta uses these)
2217 * ECX, EDX - Processor Serial Number if available, otherwise reserved
2218 * AMD: Reserved
2219 * VIA: Reserved
2220 * Safe to expose
2221 */
2222 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0);
2223 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
2224 if ( !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN)
2225 && pCurLeaf)
2226 {
2227 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
2228 if (pCPUM->u8PortableCpuIdLevel > 0)
2229 pCurLeaf->uEax = pCurLeaf->uEbx = 0;
2230 }
2231
2232 /* Cpuid 4:
2233 * Intel: Deterministic Cache Parameters Leaf
2234 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 0
2235 * AMD: Reserved
2236 * VIA: Reserved
2237 * Safe to expose, except for EAX:
2238 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
2239 * Bits 31-26: Maximum number of processor cores in this physical package**
2240 * Note: These SMP values are constant regardless of ECX
2241 */
2242 CPUMCPUIDLEAF NewLeaf;
2243 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);
2244 if (pCurLeaf)
2245 {
2246 NewLeaf.uLeaf = 4;
2247 NewLeaf.uSubLeaf = 0;
2248 NewLeaf.fSubLeafMask = 0;
2249 NewLeaf.uEax = 0;
2250 NewLeaf.uEbx = 0;
2251 NewLeaf.uEcx = 0;
2252 NewLeaf.uEdx = 0;
2253 NewLeaf.fFlags = 0;
2254#ifdef VBOX_WITH_MULTI_CORE
2255 if ( pVM->cCpus > 1
2256 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
2257 {
2258 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
2259 /* One logical processor with possibly multiple cores. */
2260 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
2261 NewLeaf.uEax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */
2262 }
2263#endif
2264 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
2265 AssertLogRelRCReturn(rc, rc);
2266 }
2267
2268 /* Cpuid 5: Monitor/mwait Leaf
2269 * Intel: ECX, EDX - reserved
2270 * EAX, EBX - Smallest and largest monitor line size
2271 * AMD: EDX - reserved
2272 * EAX, EBX - Smallest and largest monitor line size
2273 * ECX - extensions (ignored for now)
2274 * VIA: Reserved
2275 * Safe to expose
2276 */
2277 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 5, 0);
2278 if (pCurLeaf)
2279 {
2280 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
2281 if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
2282 pCurLeaf->uEax = pCurLeaf->uEbx = 0;
2283
2284 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
2285 if (fMWaitExtensions)
2286 {
2287 pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
2288 /** @todo: for now we just expose host's MWAIT C-states, although conceptually
2289 it shall be part of our power management virtualization model */
2290#if 0
2291 /* MWAIT sub C-states */
2292 pCurLeaf->uEdx =
2293 (0 << 0) /* 0 in C0 */ |
2294 (2 << 4) /* 2 in C1 */ |
2295 (2 << 8) /* 2 in C2 */ |
2296 (2 << 12) /* 2 in C3 */ |
2297 (0 << 16) /* 0 in C4 */
2298 ;
2299#endif
2300 }
2301 else
2302 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
2303 }
2304
2305 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
2306 * Safe to pass on to the guest.
2307 *
2308 * Intel: 0x800000005 reserved
2309 * 0x800000006 L2 cache information
2310 * AMD: 0x800000005 L1 cache information
2311 * 0x800000006 L2/L3 cache information
2312 * VIA: 0x800000005 TLB and L1 cache information
2313 * 0x800000006 L2 cache information
2314 */
2315
2316 /* Cpuid 0x800000007:
2317 * Intel: Reserved
2318 * AMD: EAX, EBX, ECX - reserved
2319 * EDX: Advanced Power Management Information
2320 * VIA: Reserved
2321 */
2322 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0);
2323 if (pCurLeaf)
2324 {
2325 Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
2326
2327 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
2328
2329 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
2330 {
2331 /* Only expose the TSC invariant capability bit to the guest. */
2332 pCurLeaf->uEdx &= 0
2333 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
2334 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
2335 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
2336 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
2337 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
2338 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
2339 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
2340 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
2341#if 0
2342 /*
2343 * We don't expose X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR, because newer
2344 * Linux kernels blindly assume that the AMD performance counters work
2345 * if this is set for 64 bits guests. (Can't really find a CPUID feature
2346 * bit for them though.)
2347 */
2348 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
2349#endif
2350 | 0;
2351 }
2352 else
2353 pCurLeaf->uEdx = 0;
2354 }
2355
2356 /* Cpuid 0x800000008:
2357 * Intel: EAX: Virtual/Physical address Size
2358 * EBX, ECX, EDX - reserved
2359 * AMD: EBX, EDX - reserved
2360 * EAX: Virtual/Physical/Guest address Size
2361 * ECX: Number of cores + APICIdCoreIdSize
2362 * VIA: EAX: Virtual/Physical address Size
2363 * EBX, ECX, EDX - reserved
2364 */
2365 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0);
2366 if (pCurLeaf)
2367 {
2368 /* Only expose the virtual and physical address sizes to the guest. */
2369 pCurLeaf->uEax &= UINT32_C(0x0000ffff);
2370 pCurLeaf->uEbx = pCurLeaf->uEdx = 0; /* reserved */
2371 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
2372 * NC (0-7) Number of cores; 0 equals 1 core */
2373 pCurLeaf->uEcx = 0;
2374#ifdef VBOX_WITH_MULTI_CORE
2375 if ( pVM->cCpus > 1
2376 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
2377 {
2378 /* Legacy method to determine the number of cores. */
2379 pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
2380 pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
2381 UINT32_C(0x80000001), 0);
2382 if (pExtFeatureLeaf)
2383 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
2384 }
2385#endif
2386 }
2387
2388
2389 /*
2390 * Limit it the number of entries, zapping the remainder.
2391 *
2392 * The limits are masking off stuff about power saving and similar, this
2393 * is perhaps a bit crudely done as there is probably some relatively harmless
2394 * info too in these leaves (like words about having a constant TSC).
2395 */
2396 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);
2397 if (pCurLeaf)
2398 {
2399 if (pCurLeaf->uEax > 5)
2400 {
2401 pCurLeaf->uEax = 5;
2402 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
2403 pCurLeaf->uEax + 1, UINT32_C(0x000fffff));
2404 }
2405
2406 /* NT4 hack, no zapping of extra leaves here. */
2407 if (fNt4LeafLimit && pCurLeaf->uEax > 3)
2408 pCurLeaf->uEax = 3;
2409 }
2410
2411 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0);
2412 if (pCurLeaf)
2413 {
2414 if (pCurLeaf->uEax > UINT32_C(0x80000008))
2415 {
2416 pCurLeaf->uEax = UINT32_C(0x80000008);
2417 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
2418 pCurLeaf->uEax + 1, UINT32_C(0x800fffff));
2419 }
2420 }
2421
2422 /*
2423 * Centaur stuff (VIA).
2424 *
2425 * The important part here (we think) is to make sure the 0xc0000000
2426 * function returns 0xc0000001. As for the features, we don't currently
2427 * let on about any of those... 0xc0000002 seems to be some
2428 * temperature/hz/++ stuff, include it as well (static).
2429 */
2430 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0);
2431 if (pCurLeaf)
2432 {
2433 if ( pCurLeaf->uEax >= UINT32_C(0xc0000000)
2434 && pCurLeaf->uEax <= UINT32_C(0xc0000004))
2435 {
2436 pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002));
2437 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
2438 UINT32_C(0xc0000002), UINT32_C(0xc00fffff));
2439
2440 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
2441 UINT32_C(0xc0000001), 0);
2442 if (pCurLeaf)
2443 pCurLeaf->uEdx = 0; /* all features hidden */
2444 }
2445 else
2446 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
2447 UINT32_C(0xc0000000), UINT32_C(0xc00fffff));
2448 }
2449
2450 /*
2451 * Hypervisor identification.
2452 *
2453 * We only return minimal information, primarily ensuring that the
2454 * 0x40000000 function returns 0x40000001 and identifying ourselves.
2455 * Hypervisor-specific interface is supported through GIM which will
2456 * modify these leaves if required depending on the GIM provider.
2457 */
2458 NewLeaf.uLeaf = UINT32_C(0x40000000);
2459 NewLeaf.uSubLeaf = 0;
2460 NewLeaf.fSubLeafMask = 0;
2461 NewLeaf.uEax = UINT32_C(0x40000001);
2462 NewLeaf.uEbx = 0x786f4256 /* 'VBox' */;
2463 NewLeaf.uEcx = 0x786f4256 /* 'VBox' */;
2464 NewLeaf.uEdx = 0x786f4256 /* 'VBox' */;
2465 NewLeaf.fFlags = 0;
2466 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
2467 AssertLogRelRCReturn(rc, rc);
2468
2469 NewLeaf.uLeaf = UINT32_C(0x40000001);
2470 NewLeaf.uEax = 0x656e6f6e; /* 'none' */
2471 NewLeaf.uEbx = 0;
2472 NewLeaf.uEcx = 0;
2473 NewLeaf.uEdx = 0;
2474 NewLeaf.fFlags = 0;
2475 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
2476 AssertLogRelRCReturn(rc, rc);
2477
2478 /*
2479 * Mini CPU selection support for making Mac OS X happy.
2480 */
2481 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
2482 {
2483 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
2484 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax),
2485 ASMGetCpuModelIntel(pStdFeatureLeaf->uEax),
2486 ASMGetCpuFamily(pStdFeatureLeaf->uEax),
2487 0);
2488 if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
2489 {
2490 uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
2491 uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
2492 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
2493 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) >> 4) << 16; /* 4 high model bits */
2494 uNew |= (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf) << 8; /* 4 low family bits */
2495 if (RT_BYTE3(uMaxIntelFamilyModelStep) > 0xf) /* 8 high family bits, using intel's suggested calculation. */
2496 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
2497 LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
2498 pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
2499 pStdFeatureLeaf->uEax = uNew;
2500 }
2501 }
2502
2503 /*
2504 * MSR fudging.
2505 */
2506 /** @cfgm{/CPUM/FudgeMSRs, boolean, true}
2507 * Fudges some common MSRs if not present in the selected CPU database entry.
2508 * This is for trying to keep VMs running when moved between different hosts
2509 * and different CPU vendors. */
2510 bool fEnable;
2511 rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRCReturn(rc, rc);
2512 if (fEnable)
2513 {
2514 rc = cpumR3MsrApplyFudge(pVM);
2515 AssertLogRelRCReturn(rc, rc);
2516 }
2517
2518 /*
2519 * Move the MSR and CPUID arrays over on the hypervisor heap, and explode
2520 * guest CPU features again.
2521 */
2522 void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3;
2523 int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves);
2524 RTMemFree(pvFree);
2525
2526 pvFree = pCPUM->GuestInfo.paMsrRangesR3;
2527 int rc2 = MMHyperDupMem(pVM, pvFree,
2528 sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32,
2529 MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3);
2530 RTMemFree(pvFree);
2531 AssertLogRelRCReturn(rc1, rc1);
2532 AssertLogRelRCReturn(rc2, rc2);
2533
2534 pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3);
2535 pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3);
2536 cpumR3MsrRegStats(pVM);
2537
2538 /*
2539 * Some more configuration that we're applying at the end of everything
2540 * via the CPUMSetGuestCpuIdFeature API.
2541 */
2542
2543 /* Check if PAE was explicitely enabled by the user. */
2544 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc);
2545 if (fEnable)
2546 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
2547
2548 /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
2549 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc);
2550 if (fEnable)
2551 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
2552
2553 /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */
2554 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false); AssertRCReturn(rc, rc);
2555 if (fEnable)
2556 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);
2557
2558#undef PORTABLE_DISABLE_FEATURE_BIT
2559#undef PORTABLE_CLEAR_BITS_WHEN
2560
2561 return VINF_SUCCESS;
2562}
2563
2564
2565
2566/*
2567 *
2568 *
2569 * Saved state related code.
2570 * Saved state related code.
2571 * Saved state related code.
2572 *
2573 *
2574 */
2575
2576/**
2577 * Called both in pass 0 and the final pass.
2578 *
2579 * @param pVM Pointer to the VM.
2580 * @param pSSM The saved state handle.
2581 */
2582void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
2583{
2584 /*
2585 * Save all the CPU ID leaves here so we can check them for compatibility
2586 * upon loading.
2587 */
2588 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
2589 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
2590
2591 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
2592 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
2593
2594 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
2595 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
2596
2597 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
2598
2599 /*
2600 * Save a good portion of the raw CPU IDs as well as they may come in
2601 * handy when validating features for raw mode.
2602 */
2603 CPUMCPUID aRawStd[16];
2604 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
2605 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
2606 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
2607 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
2608
2609 CPUMCPUID aRawExt[32];
2610 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
2611 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
2612 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
2613 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
2614}
2615
2616
2617static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
2618{
2619 uint32_t cCpuIds;
2620 int rc = SSMR3GetU32(pSSM, &cCpuIds);
2621 if (RT_SUCCESS(rc))
2622 {
2623 if (cCpuIds < 64)
2624 {
2625 for (uint32_t i = 0; i < cCpuIds; i++)
2626 {
2627 CPUMCPUID CpuId;
2628 rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
2629 if (RT_FAILURE(rc))
2630 break;
2631
2632 CPUMCPUIDLEAF NewLeaf;
2633 NewLeaf.uLeaf = uBase + i;
2634 NewLeaf.uSubLeaf = 0;
2635 NewLeaf.fSubLeafMask = 0;
2636 NewLeaf.uEax = CpuId.eax;
2637 NewLeaf.uEbx = CpuId.ebx;
2638 NewLeaf.uEcx = CpuId.ecx;
2639 NewLeaf.uEdx = CpuId.edx;
2640 NewLeaf.fFlags = 0;
2641 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf);
2642 }
2643 }
2644 else
2645 rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2646 }
2647 if (RT_FAILURE(rc))
2648 {
2649 RTMemFree(*ppaLeaves);
2650 *ppaLeaves = NULL;
2651 *pcLeaves = 0;
2652 }
2653 return rc;
2654}
2655
2656
2657static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
2658{
2659 *ppaLeaves = NULL;
2660 *pcLeaves = 0;
2661
2662 int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
2663 if (RT_SUCCESS(rc))
2664 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
2665 if (RT_SUCCESS(rc))
2666 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
2667
2668 return rc;
2669}
2670
2671
2672/**
2673 * Loads the CPU ID leaves saved by pass 0.
2674 *
2675 * @returns VBox status code.
2676 * @param pVM Pointer to the VM.
2677 * @param pSSM The saved state handle.
2678 * @param uVersion The format version.
2679 */
2680int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2681{
2682 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
2683
2684 /*
2685 * Define a bunch of macros for simplifying the code.
2686 */
2687 /* Generic expression + failure message. */
2688#define CPUID_CHECK_RET(expr, fmt) \
2689 do { \
2690 if (!(expr)) \
2691 { \
2692 char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \
2693 if (fStrictCpuIdChecks) \
2694 { \
2695 int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \
2696 RTStrFree(pszMsg); \
2697 return rcCpuid; \
2698 } \
2699 LogRel(("CPUM: %s\n", pszMsg)); \
2700 RTStrFree(pszMsg); \
2701 } \
2702 } while (0)
2703#define CPUID_CHECK_WRN(expr, fmt) \
2704 do { \
2705 if (!(expr)) \
2706 LogRel(fmt); \
2707 } while (0)
2708
2709 /* For comparing two values and bitch if they differs. */
2710#define CPUID_CHECK2_RET(what, host, saved) \
2711 do { \
2712 if ((host) != (saved)) \
2713 { \
2714 if (fStrictCpuIdChecks) \
2715 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
2716 N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \
2717 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
2718 } \
2719 } while (0)
2720#define CPUID_CHECK2_WRN(what, host, saved) \
2721 do { \
2722 if ((host) != (saved)) \
2723 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
2724 } while (0)
2725
2726 /* For checking raw cpu features (raw mode). */
2727#define CPUID_RAW_FEATURE_RET(set, reg, bit) \
2728 do { \
2729 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
2730 { \
2731 if (fStrictCpuIdChecks) \
2732 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
2733 N_(#bit " mismatch: host=%d saved=%d"), \
2734 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \
2735 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
2736 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
2737 } \
2738 } while (0)
2739#define CPUID_RAW_FEATURE_WRN(set, reg, bit) \
2740 do { \
2741 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
2742 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
2743 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
2744 } while (0)
2745#define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0)
2746
2747 /* For checking guest features. */
2748#define CPUID_GST_FEATURE_RET(set, reg, bit) \
2749 do { \
2750 if ( (aGuestCpuId##set [1].reg & bit) \
2751 && !(aHostRaw##set [1].reg & bit) \
2752 && !(aHostOverride##set [1].reg & bit) \
2753 ) \
2754 { \
2755 if (fStrictCpuIdChecks) \
2756 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
2757 N_(#bit " is not supported by the host but has already exposed to the guest")); \
2758 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
2759 } \
2760 } while (0)
2761#define CPUID_GST_FEATURE_WRN(set, reg, bit) \
2762 do { \
2763 if ( (aGuestCpuId##set [1].reg & bit) \
2764 && !(aHostRaw##set [1].reg & bit) \
2765 && !(aHostOverride##set [1].reg & bit) \
2766 ) \
2767 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
2768 } while (0)
2769#define CPUID_GST_FEATURE_EMU(set, reg, bit) \
2770 do { \
2771 if ( (aGuestCpuId##set [1].reg & bit) \
2772 && !(aHostRaw##set [1].reg & bit) \
2773 && !(aHostOverride##set [1].reg & bit) \
2774 ) \
2775 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
2776 } while (0)
2777#define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0)
2778
2779 /* For checking guest features if AMD guest CPU. */
2780#define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \
2781 do { \
2782 if ( (aGuestCpuId##set [1].reg & bit) \
2783 && fGuestAmd \
2784 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
2785 && !(aHostOverride##set [1].reg & bit) \
2786 ) \
2787 { \
2788 if (fStrictCpuIdChecks) \
2789 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
2790 N_(#bit " is not supported by the host but has already exposed to the guest")); \
2791 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
2792 } \
2793 } while (0)
2794#define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \
2795 do { \
2796 if ( (aGuestCpuId##set [1].reg & bit) \
2797 && fGuestAmd \
2798 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
2799 && !(aHostOverride##set [1].reg & bit) \
2800 ) \
2801 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
2802 } while (0)
2803#define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \
2804 do { \
2805 if ( (aGuestCpuId##set [1].reg & bit) \
2806 && fGuestAmd \
2807 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
2808 && !(aHostOverride##set [1].reg & bit) \
2809 ) \
2810 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
2811 } while (0)
2812#define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0)
2813
2814 /* For checking AMD features which have a corresponding bit in the standard
2815 range. (Intel defines very few bits in the extended feature sets.) */
2816#define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \
2817 do { \
2818 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
2819 && !(fHostAmd \
2820 ? aHostRawExt[1].reg & (ExtBit) \
2821 : aHostRawStd[1].reg & (StdBit)) \
2822 && !(aHostOverrideExt[1].reg & (ExtBit)) \
2823 ) \
2824 { \
2825 if (fStrictCpuIdChecks) \
2826 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
2827 N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \
2828 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
2829 } \
2830 } while (0)
2831#define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \
2832 do { \
2833 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
2834 && !(fHostAmd \
2835 ? aHostRawExt[1].reg & (ExtBit) \
2836 : aHostRawStd[1].reg & (StdBit)) \
2837 && !(aHostOverrideExt[1].reg & (ExtBit)) \
2838 ) \
2839 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
2840 } while (0)
2841#define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \
2842 do { \
2843 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
2844 && !(fHostAmd \
2845 ? aHostRawExt[1].reg & (ExtBit) \
2846 : aHostRawStd[1].reg & (StdBit)) \
2847 && !(aHostOverrideExt[1].reg & (ExtBit)) \
2848 ) \
2849 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
2850 } while (0)
2851#define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0)
2852
2853 /*
2854 * Load them into stack buffers first.
2855 */
2856 PCPUMCPUIDLEAF paLeaves;
2857 uint32_t cLeaves;
2858 int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves);
2859 AssertRCReturn(rc, rc);
2860
2861 /** @todo we'll be leaking paLeaves on error return... */
2862
2863 CPUMCPUID GuestCpuIdDef;
2864 rc = SSMR3GetMem(pSSM, &GuestCpuIdDef, sizeof(GuestCpuIdDef));
2865 AssertRCReturn(rc, rc);
2866
2867 CPUMCPUID aRawStd[16];
2868 uint32_t cRawStd;
2869 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
2870 if (cRawStd > RT_ELEMENTS(aRawStd))
2871 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2872 rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
2873 AssertRCReturn(rc, rc);
2874 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
2875 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
2876
2877 CPUMCPUID aRawExt[32];
2878 uint32_t cRawExt;
2879 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
2880 if (cRawExt > RT_ELEMENTS(aRawExt))
2881 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2882 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
2883 AssertRCReturn(rc, rc);
2884 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
2885 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
2886
2887 /*
2888 * Get the raw CPU IDs for the current host.
2889 */
2890 CPUMCPUID aHostRawStd[16];
2891 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
2892 ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
2893
2894 CPUMCPUID aHostRawExt[32];
2895 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
2896 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,
2897 &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
2898
2899 /*
2900 * Get the host and guest overrides so we don't reject the state because
2901 * some feature was enabled thru these interfaces.
2902 * Note! We currently only need the feature leaves, so skip rest.
2903 */
2904 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
2905 CPUMCPUID aHostOverrideStd[2];
2906 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
2907 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);
2908
2909 CPUMCPUID aHostOverrideExt[2];
2910 memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt));
2911 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg);
2912
2913 /*
2914 * This can be skipped.
2915 */
2916 bool fStrictCpuIdChecks;
2917 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks, true);
2918
2919
2920
2921 /*
2922 * For raw-mode we'll require that the CPUs are very similar since we don't
2923 * intercept CPUID instructions for user mode applications.
2924 */
2925 if (!HMIsEnabled(pVM))
2926 {
2927 /* CPUID(0) */
2928 CPUID_CHECK_RET( aHostRawStd[0].ebx == aRawStd[0].ebx
2929 && aHostRawStd[0].ecx == aRawStd[0].ecx
2930 && aHostRawStd[0].edx == aRawStd[0].edx,
2931 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
2932 &aHostRawStd[0].ebx, &aHostRawStd[0].edx, &aHostRawStd[0].ecx,
2933 &aRawStd[0].ebx, &aRawStd[0].edx, &aRawStd[0].ecx));
2934 CPUID_CHECK2_WRN("Std CPUID max leaf", aHostRawStd[0].eax, aRawStd[0].eax);
2935 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3);
2936 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);
2937
2938 bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].ebx, aRawStd[0].ecx, aRawStd[0].edx);
2939
2940 /* CPUID(1).eax */
2941 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawStd[1].eax), ASMGetCpuFamily(aRawStd[1].eax));
2942 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawStd[1].eax, fIntel), ASMGetCpuModel(aRawStd[1].eax, fIntel));
2943 CPUID_CHECK2_WRN("CPU type", (aHostRawStd[1].eax >> 12) & 3, (aRawStd[1].eax >> 12) & 3 );
2944
2945 /* CPUID(1).ebx - completely ignore CPU count and APIC ID. */
2946 CPUID_CHECK2_RET("CPU brand ID", aHostRawStd[1].ebx & 0xff, aRawStd[1].ebx & 0xff);
2947 CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].ebx >> 8) & 0xff, (aRawStd[1].ebx >> 8) & 0xff);
2948
2949 /* CPUID(1).ecx */
2950 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);
2951 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);
2952 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);
2953 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
2954 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);
2955 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);
2956 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);
2957 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_EST);
2958 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);
2959 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);
2960 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);
2961 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
2962 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);
2963 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);
2964 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);
2965 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);
2966 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
2967 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/);
2968 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);
2969 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);
2970 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);
2971 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
2972 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);
2973 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);
2974 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/);
2975 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);
2976 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);
2977 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE);
2978 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);
2979 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/);
2980 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/);
2981 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);
2982
2983 /* CPUID(1).edx */
2984 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
2985 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
2986 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);
2987 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
2988 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);
2989 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);
2990 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
2991 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
2992 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);
2993 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
2994 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
2995 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
2996 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
2997 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
2998 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
2999 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);
3000 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
3001 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
3002 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
3003 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);
3004 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
3005 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_DS);
3006 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);
3007 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);
3008 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);
3009 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);
3010 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);
3011 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SS);
3012 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_HTT);
3013 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_TM);
3014 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);
3015 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PBE);
3016
3017 /* CPUID(2) - config, mostly about caches. ignore. */
3018 /* CPUID(3) - processor serial number. ignore. */
3019 /* CPUID(4) - config, cache and topology - takes ECX as input. ignore. */
3020 /* CPUID(5) - mwait/monitor config. ignore. */
3021 /* CPUID(6) - power management. ignore. */
3022 /* CPUID(7) - ???. ignore. */
3023 /* CPUID(8) - ???. ignore. */
3024 /* CPUID(9) - DCA. ignore for now. */
3025 /* CPUID(a) - PeMo info. ignore for now. */
3026 /* CPUID(b) - topology info - takes ECX as input. ignore. */
3027
3028 /* CPUID(d) - XCR0 stuff - takes ECX as input. We only warn about the main level (ECX=0) for now. */
3029 CPUID_CHECK_WRN( aRawStd[0].eax < UINT32_C(0x0000000d)
3030 || aHostRawStd[0].eax >= UINT32_C(0x0000000d),
3031 ("CPUM: Standard leaf D was present on saved state host, not present on current.\n"));
3032 if ( aRawStd[0].eax >= UINT32_C(0x0000000d)
3033 && aHostRawStd[0].eax >= UINT32_C(0x0000000d))
3034 {
3035 CPUID_CHECK2_WRN("Valid low XCR0 bits", aHostRawStd[0xd].eax, aRawStd[0xd].eax);
3036 CPUID_CHECK2_WRN("Valid high XCR0 bits", aHostRawStd[0xd].edx, aRawStd[0xd].edx);
3037 CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size", aHostRawStd[0xd].ebx, aRawStd[0xd].ebx);
3038 CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size", aHostRawStd[0xd].ecx, aRawStd[0xd].ecx);
3039 }
3040
3041 /* CPUID(0x80000000) - same as CPUID(0) except for eax.
3042 Note! Intel have/is marking many of the fields here as reserved. We
3043 will verify them as if it's an AMD CPU. */
3044 CPUID_CHECK_RET( (aHostRawExt[0].eax >= UINT32_C(0x80000001) && aHostRawExt[0].eax <= UINT32_C(0x8000007f))
3045 || !(aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f)),
3046 (N_("Extended leaves was present on saved state host, but is missing on the current\n")));
3047 if (aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f))
3048 {
3049 CPUID_CHECK_RET( aHostRawExt[0].ebx == aRawExt[0].ebx
3050 && aHostRawExt[0].ecx == aRawExt[0].ecx
3051 && aHostRawExt[0].edx == aRawExt[0].edx,
3052 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
3053 &aHostRawExt[0].ebx, &aHostRawExt[0].edx, &aHostRawExt[0].ecx,
3054 &aRawExt[0].ebx, &aRawExt[0].edx, &aRawExt[0].ecx));
3055 CPUID_CHECK2_WRN("Ext CPUID max leaf", aHostRawExt[0].eax, aRawExt[0].eax);
3056
3057 /* CPUID(0x80000001).eax - same as CPUID(0).eax. */
3058 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawExt[1].eax), ASMGetCpuFamily(aRawExt[1].eax));
3059 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawExt[1].eax, fIntel), ASMGetCpuModel(aRawExt[1].eax, fIntel));
3060 CPUID_CHECK2_WRN("CPU type", (aHostRawExt[1].eax >> 12) & 3, (aRawExt[1].eax >> 12) & 3 );
3061 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3 );
3062 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);
3063
3064 /* CPUID(0x80000001).ebx - Brand ID (maybe), just warn if things differs. */
3065 CPUID_CHECK2_WRN("CPU BrandID", aHostRawExt[1].ebx & 0xffff, aRawExt[1].ebx & 0xffff);
3066 CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].ebx >> 16) & 0xfff, (aRawExt[1].ebx >> 16) & 0xfff);
3067 CPUID_CHECK2_WRN("PkgType", (aHostRawExt[1].ebx >> 28) & 0xf, (aRawExt[1].ebx >> 28) & 0xf);
3068
3069 /* CPUID(0x80000001).ecx */
3070 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
3071 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);
3072 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);
3073 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);
3074 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);
3075 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);
3076 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);
3077 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);
3078 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);
3079 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);
3080 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);
3081 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);
3082 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);
3083 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);
3084 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
3085 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
3086 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
3087 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
3088 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
3089 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
3090 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
3091 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
3092 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
3093 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
3094 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
3095 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
3096 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
3097 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
3098 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
3099 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
3100 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
3101 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
3102
3103 /* CPUID(0x80000001).edx */
3104 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FPU);
3105 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_VME);
3106 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_DE);
3107 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE);
3108 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_TSC);
3109 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MSR);
3110 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAE);
3111 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCE);
3112 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CX8);
3113 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC);
3114 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/);
3115 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP);
3116 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR);
3117 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE);
3118 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCA);
3119 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CMOV);
3120 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAT);
3121 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE36);
3122 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/);
3123 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/);
3124 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
3125 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/);
3126 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
3127 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MMX);
3128 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR);
3129 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
3130 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
3131 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
3132 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/);
3133 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
3134 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
3135 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
3136
3137 /** @todo verify the rest as well. */
3138 }
3139 }
3140
3141
3142
3143 /*
3144 * Verify that we can support the features already exposed to the guest on
3145 * this host.
3146 *
3147 * Most of the features we're emulating requires intercepting instruction
3148 * and doing it the slow way, so there is no need to warn when they aren't
3149 * present in the host CPU. Thus we use IGN instead of EMU on these.
3150 *
3151 * Trailing comments:
3152 * "EMU" - Possible to emulate, could be lots of work and very slow.
3153 * "EMU?" - Can this be emulated?
3154 */
3155 CPUMCPUID aGuestCpuIdStd[2];
3156 RT_ZERO(aGuestCpuIdStd);
3157 cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
3158
3159 /* CPUID(1).ecx */
3160 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU
3161 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?
3162 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?
3163 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
3164 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?
3165 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU
3166 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU
3167 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_EST); // -> EMU
3168 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?
3169 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU
3170 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU
3171 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
3172 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?
3173 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?
3174 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
3175 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU
3176 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
3177 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/);
3178 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?
3179 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU
3180 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU
3181 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
3182 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU
3183 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU
3184 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/);
3185 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES); // -> EMU
3186 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU
3187 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU
3188 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?
3189 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/);
3190 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/);
3191 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host
3192
3193 /* CPUID(1).edx */
3194 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
3195 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
3196 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?
3197 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
3198 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
3199 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
3200 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
3201 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
3202 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
3203 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
3204 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
3205 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
3206 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
3207 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
3208 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
3209 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
3210 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
3211 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
3212 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
3213 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU
3214 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
3215 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?
3216 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?
3217 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
3218 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
3219 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU
3220 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU
3221 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?
3222 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?
3223 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?
3224 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU
3225 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?
3226
3227 /* CPUID(0x80000000). */
3228 CPUMCPUID aGuestCpuIdExt[2];
3229 RT_ZERO(aGuestCpuIdExt);
3230 if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
3231 {
3232 /** @todo deal with no 0x80000001 on the host. */
3233 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx);
3234 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].ebx, aGuestCpuIdExt[0].ecx, aGuestCpuIdExt[0].edx);
3235
3236 /* CPUID(0x80000001).ecx */
3237 CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU
3238 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU
3239 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU
3240 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
3241 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU
3242 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU
3243 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU
3244 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
3245 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
3246 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?
3247 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU
3248 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5); // -> EMU
3249 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU
3250 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU
3251 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
3252 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
3253 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
3254 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
3255 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
3256 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
3257 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
3258 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
3259 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
3260 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
3261 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
3262 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
3263 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
3264 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
3265 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
3266 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
3267 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
3268 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
3269
3270 /* CPUID(0x80000001).edx */
3271 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU
3272 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU
3273 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU
3274 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);
3275 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
3276 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
3277 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);
3278 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);
3279 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
3280 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);
3281 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/);
3282 CPUID_GST_FEATURE_IGN( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only.
3283 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);
3284 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);
3285 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);
3286 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
3287 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);
3288 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
3289 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/);
3290 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/);
3291 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
3292 CPUID_GST_FEATURE_WRN( Ext, edx, RT_BIT_32(21) /*reserved*/);
3293 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
3294 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
3295 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
3296 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
3297 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
3298 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
3299 CPUID_GST_FEATURE_IGN( Ext, edx, RT_BIT_32(28) /*reserved*/);
3300 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
3301 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
3302 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
3303 }
3304
3305 /*
3306 * We're good, commit the CPU ID leaves.
3307 */
3308 MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
3309 pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR;
3310 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR;
3311 pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef;
3312 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves);
3313 RTMemFree(paLeaves);
3314 AssertLogRelRCReturn(rc, rc);
3315
3316
3317#undef CPUID_CHECK_RET
3318#undef CPUID_CHECK_WRN
3319#undef CPUID_CHECK2_RET
3320#undef CPUID_CHECK2_WRN
3321#undef CPUID_RAW_FEATURE_RET
3322#undef CPUID_RAW_FEATURE_WRN
3323#undef CPUID_RAW_FEATURE_IGN
3324#undef CPUID_GST_FEATURE_RET
3325#undef CPUID_GST_FEATURE_WRN
3326#undef CPUID_GST_FEATURE_EMU
3327#undef CPUID_GST_FEATURE_IGN
3328#undef CPUID_GST_FEATURE2_RET
3329#undef CPUID_GST_FEATURE2_WRN
3330#undef CPUID_GST_FEATURE2_EMU
3331#undef CPUID_GST_FEATURE2_IGN
3332#undef CPUID_GST_AMD_FEATURE_RET
3333#undef CPUID_GST_AMD_FEATURE_WRN
3334#undef CPUID_GST_AMD_FEATURE_EMU
3335#undef CPUID_GST_AMD_FEATURE_IGN
3336
3337 return VINF_SUCCESS;
3338}
3339
3340#endif /* VBOX_IN_VMM */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette