VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUM.cpp@ 41933

Last change on this file since 41933 was 41931, checked in by vboxsync, 13 years ago

TRPM: Save state directly to the CPUMCPU context member instead of putting on the stack. this avoid copying the state around before returning to host context to service an IRQ, or before using IEM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 201.5 KB
Line 
1/* $Id: CPUM.cpp 41931 2012-06-27 16:12:16Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_cpum CPUM - CPU Monitor / Manager
19 *
20 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
21 * also responsible for lazy FPU handling and some of the context loading
22 * in raw mode.
23 *
24 * There are three CPU contexts, the most important one is the guest one (GC).
25 * When running in raw-mode (RC) there is a special hyper context for the VMM
26 * part that floats around inside the guest address space. When running in
27 * raw-mode, CPUM also maintains a host context for saving and restoring
28 * registers across world switches. This latter is done in cooperation with the
29 * world switcher (@see pg_vmm).
30 *
31 * @see grp_cpum
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_CPUM
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/cpumdis.h>
40#include <VBox/vmm/cpumctx-v1_6.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/mm.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/dbgf.h>
45#include <VBox/vmm/patm.h>
46#include <VBox/vmm/hwaccm.h>
47#include <VBox/vmm/ssm.h>
48#include "CPUMInternal.h"
49#include <VBox/vmm/vm.h>
50
51#include <VBox/param.h>
52#include <VBox/dis.h>
53#include <VBox/err.h>
54#include <VBox/log.h>
55#include <iprt/assert.h>
56#include <iprt/asm-amd64-x86.h>
57#include <iprt/string.h>
58#include <iprt/mp.h>
59#include <iprt/cpuset.h>
60#include "internal/pgm.h"
61
62
63/*******************************************************************************
64* Defined Constants And Macros *
65*******************************************************************************/
66#if 0 /* later when actual changes have been made */
67/** The current saved state version. */
68#define CPUM_SAVED_STATE_VERSION 14
69#else
70# define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_MEM
71#endif
72/** The current saved state version before using SSMR3PutStruct. */
73#define CPUM_SAVED_STATE_VERSION_MEM 13
74/** The saved state version before introducing the MSR size field. */
75#define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 12
76/** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden
77 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */
78#define CPUM_SAVED_STATE_VERSION_VER3_2 11
79/** The saved state version of 3.0 and 3.1 trunk before the teleportation
80 * changes. */
81#define CPUM_SAVED_STATE_VERSION_VER3_0 10
82/** The saved state version for the 2.1 trunk before the MSR changes. */
83#define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9
84/** The saved state version of 2.0, used for backwards compatibility. */
85#define CPUM_SAVED_STATE_VERSION_VER2_0 8
86/** The saved state version of 1.6, used for backwards compatibility. */
87#define CPUM_SAVED_STATE_VERSION_VER1_6 6
88
89#define CPUM_WITH_CHANGED_CPUMCTX
90
91/*******************************************************************************
92* Structures and Typedefs *
93*******************************************************************************/
94
95/**
96 * What kind of cpu info dump to perform.
97 */
98typedef enum CPUMDUMPTYPE
99{
100 CPUMDUMPTYPE_TERSE,
101 CPUMDUMPTYPE_DEFAULT,
102 CPUMDUMPTYPE_VERBOSE
103} CPUMDUMPTYPE;
104/** Pointer to a cpu info dump type. */
105typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
106
107
108/*******************************************************************************
109* Internal Functions *
110*******************************************************************************/
111static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
112static int cpumR3CpuIdInit(PVM pVM);
113static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
114static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
115static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM);
116static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
117static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
118static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
119static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
120static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
121static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
122static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
123static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
124
125
126/*******************************************************************************
127* Global Variables *
128*******************************************************************************/
129/** Saved state field descriptors for CPUMCTX. */
130static const SSMFIELD g_aCpumCtxFields[] =
131{
132 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
133 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
134 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
135 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
136 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
137 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
138 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
139 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
140 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
141 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
142 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
143 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
144 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
145 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
146 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
147 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
148 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
149 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
150 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
151 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
152 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
153 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
154 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
155 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
156 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
157 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
158 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
159 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
160 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
161 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
162 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
163 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
164 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
165 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
166 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
167 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
168 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
169 SSMFIELD_ENTRY( CPUMCTX, rdi),
170 SSMFIELD_ENTRY( CPUMCTX, rsi),
171 SSMFIELD_ENTRY( CPUMCTX, rbp),
172 SSMFIELD_ENTRY( CPUMCTX, rax),
173 SSMFIELD_ENTRY( CPUMCTX, rbx),
174 SSMFIELD_ENTRY( CPUMCTX, rdx),
175 SSMFIELD_ENTRY( CPUMCTX, rcx),
176 SSMFIELD_ENTRY( CPUMCTX, rsp),
177 SSMFIELD_ENTRY_OLD( lss_esp, sizeof(uint32_t)),
178 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
179 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
180 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
181 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
182 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
183 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
184 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
185 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
186 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
187 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
188 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
189 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
190 SSMFIELD_ENTRY( CPUMCTX, rflags),
191 SSMFIELD_ENTRY( CPUMCTX, rip),
192 SSMFIELD_ENTRY( CPUMCTX, r8),
193 SSMFIELD_ENTRY( CPUMCTX, r9),
194 SSMFIELD_ENTRY( CPUMCTX, r10),
195 SSMFIELD_ENTRY( CPUMCTX, r11),
196 SSMFIELD_ENTRY( CPUMCTX, r12),
197 SSMFIELD_ENTRY( CPUMCTX, r13),
198 SSMFIELD_ENTRY( CPUMCTX, r14),
199 SSMFIELD_ENTRY( CPUMCTX, r15),
200 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
201 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
202 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
203 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
204 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
205 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
206 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
207 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
208 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
209 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
210 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
211 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
212 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
213 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
214 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
215 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
216 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
217 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
218 SSMFIELD_ENTRY( CPUMCTX, cr0),
219 SSMFIELD_ENTRY( CPUMCTX, cr2),
220 SSMFIELD_ENTRY( CPUMCTX, cr3),
221 SSMFIELD_ENTRY( CPUMCTX, cr4),
222 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
223 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
224 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
225 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
226 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
227 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
228 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
229 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
230 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
231 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
232 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
233 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
234 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
235 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
236 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
237 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
238 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
239 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
240 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
241 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
242 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
243 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
244 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
245 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
246 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
247 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
248 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
249 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
250 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
251 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
252 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
253 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
254 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
255 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
256 SSMFIELD_ENTRY_TERM()
257};
258
259/** Saved state field descriptors for CPUMCTX_VER1_6. */
260static const SSMFIELD g_aCpumCtxFieldsV16[] =
261{
262 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
263 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
264 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
265 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
266 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
267 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
268 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
269 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
270 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
271 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
272 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
273 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
274 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
275 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
276 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
277 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
278 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
279 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
280 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
281 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
282 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
283 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
284 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
285 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
286 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
287 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
288 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
289 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
290 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
291 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
292 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
293 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
294 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
295 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
296 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
297 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
298 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
299 SSMFIELD_ENTRY( CPUMCTX, rdi),
300 SSMFIELD_ENTRY( CPUMCTX, rsi),
301 SSMFIELD_ENTRY( CPUMCTX, rbp),
302 SSMFIELD_ENTRY( CPUMCTX, rax),
303 SSMFIELD_ENTRY( CPUMCTX, rbx),
304 SSMFIELD_ENTRY( CPUMCTX, rdx),
305 SSMFIELD_ENTRY( CPUMCTX, rcx),
306 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, rsp),
307 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
308 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
309 SSMFIELD_ENTRY_OLD( CPUMCTX, sizeof(uint64_t) /*rsp_notused*/),
310 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
311 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
312 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
313 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
314 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
315 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
316 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
317 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
318 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
319 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
320 SSMFIELD_ENTRY( CPUMCTX, rflags),
321 SSMFIELD_ENTRY( CPUMCTX, rip),
322 SSMFIELD_ENTRY( CPUMCTX, r8),
323 SSMFIELD_ENTRY( CPUMCTX, r9),
324 SSMFIELD_ENTRY( CPUMCTX, r10),
325 SSMFIELD_ENTRY( CPUMCTX, r11),
326 SSMFIELD_ENTRY( CPUMCTX, r12),
327 SSMFIELD_ENTRY( CPUMCTX, r13),
328 SSMFIELD_ENTRY( CPUMCTX, r14),
329 SSMFIELD_ENTRY( CPUMCTX, r15),
330 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, es.u64Base),
331 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
332 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
333 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, cs.u64Base),
334 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
335 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
336 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ss.u64Base),
337 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
338 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
339 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ds.u64Base),
340 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
341 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
342 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, fs.u64Base),
343 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
344 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
345 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gs.u64Base),
346 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
347 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
348 SSMFIELD_ENTRY( CPUMCTX, cr0),
349 SSMFIELD_ENTRY( CPUMCTX, cr2),
350 SSMFIELD_ENTRY( CPUMCTX, cr3),
351 SSMFIELD_ENTRY( CPUMCTX, cr4),
352 SSMFIELD_ENTRY_OLD( cr8, sizeof(uint64_t)),
353 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
354 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
355 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
356 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
357 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
358 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
359 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
360 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
361 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
362 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gdtr.pGdt),
363 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
364 SSMFIELD_ENTRY_OLD( gdtrPadding64, sizeof(uint64_t)),
365 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
366 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, idtr.pIdt),
367 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
368 SSMFIELD_ENTRY_OLD( idtrPadding64, sizeof(uint64_t)),
369 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
370 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
371 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
372 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
373 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
374 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
375 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
376 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
377 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
378 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
379 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
380 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
381 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
382 SSMFIELD_ENTRY_OLD( msrFSBASE, sizeof(uint64_t)),
383 SSMFIELD_ENTRY_OLD( msrGSBASE, sizeof(uint64_t)),
384 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
385 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ldtr.u64Base),
386 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
387 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
388 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, tr.u64Base),
389 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
390 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
391 SSMFIELD_ENTRY_OLD( padding, sizeof(uint32_t)*2),
392 SSMFIELD_ENTRY_TERM()
393};
394
395
396/**
397 * Initializes the CPUM.
398 *
399 * @returns VBox status code.
400 * @param pVM Pointer to the VM.
401 */
402VMMR3DECL(int) CPUMR3Init(PVM pVM)
403{
404 LogFlow(("CPUMR3Init\n"));
405
406 /*
407 * Assert alignment and sizes.
408 */
409 AssertCompileMemberAlignment(VM, cpum.s, 32);
410 AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
411 AssertCompileSizeAlignment(CPUMCTX, 64);
412 AssertCompileSizeAlignment(CPUMCTXMSRS, 64);
413 AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
414 AssertCompileMemberAlignment(VM, cpum, 64);
415 AssertCompileMemberAlignment(VM, aCpus, 64);
416 AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
417 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
418
419 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
420 pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
421 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
422
423 /* Calculate the offset from CPUMCPU to CPUM. */
424 for (VMCPUID i = 0; i < pVM->cCpus; i++)
425 {
426 PVMCPU pVCpu = &pVM->aCpus[i];
427
428 pVCpu->cpum.s.offCPUM = RT_OFFSETOF(VM, aCpus[i].cpum) - RT_OFFSETOF(VM, cpum);
429 Assert((uintptr_t)&pVCpu->cpum - pVCpu->cpum.s.offCPUM == (uintptr_t)&pVM->cpum);
430 }
431
432 /*
433 * Check that the CPU supports the minimum features we require.
434 */
435 if (!ASMHasCpuId())
436 {
437 Log(("The CPU doesn't support CPUID!\n"));
438 return VERR_UNSUPPORTED_CPU;
439 }
440 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
441 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
442
443 /* Setup the CR4 AND and OR masks used in the switcher */
444 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
445 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
446 {
447 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
448 /* No FXSAVE implies no SSE */
449 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
450 pVM->cpum.s.CR4.OrMask = 0;
451 }
452 else
453 {
454 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
455 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
456 }
457
458 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
459 {
460 Log(("The CPU doesn't support MMX!\n"));
461 return VERR_UNSUPPORTED_CPU;
462 }
463 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
464 {
465 Log(("The CPU doesn't support TSC!\n"));
466 return VERR_UNSUPPORTED_CPU;
467 }
468 /* Bogus on AMD? */
469 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
470 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
471
472 /*
473 * Detect the host CPU vendor.
474 * (The guest CPU vendor is re-detected later on.)
475 */
476 uint32_t uEAX, uEBX, uECX, uEDX;
477 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
478 pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX);
479 pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor;
480
481 /*
482 * Setup hypervisor startup values.
483 */
484
485 /*
486 * Register saved state data item.
487 */
488 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
489 NULL, cpumR3LiveExec, NULL,
490 NULL, cpumR3SaveExec, NULL,
491 cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
492 if (RT_FAILURE(rc))
493 return rc;
494
495 /*
496 * Register info handlers and registers with the debugger facility.
497 */
498 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
499 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
500 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
501 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
502 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
503 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
504
505 rc = cpumR3DbgInit(pVM);
506 if (RT_FAILURE(rc))
507 return rc;
508
509 /*
510 * Initialize the Guest CPUID state.
511 */
512 rc = cpumR3CpuIdInit(pVM);
513 if (RT_FAILURE(rc))
514 return rc;
515 CPUMR3Reset(pVM);
516 return VINF_SUCCESS;
517}
518
519
520/**
521 * Detect the CPU vendor give n the
522 *
523 * @returns The vendor.
524 * @param uEAX EAX from CPUID(0).
525 * @param uEBX EBX from CPUID(0).
526 * @param uECX ECX from CPUID(0).
527 * @param uEDX EDX from CPUID(0).
528 */
529static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
530{
531 if ( uEAX >= 1
532 && uEBX == X86_CPUID_VENDOR_AMD_EBX
533 && uECX == X86_CPUID_VENDOR_AMD_ECX
534 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
535 return CPUMCPUVENDOR_AMD;
536
537 if ( uEAX >= 1
538 && uEBX == X86_CPUID_VENDOR_INTEL_EBX
539 && uECX == X86_CPUID_VENDOR_INTEL_ECX
540 && uEDX == X86_CPUID_VENDOR_INTEL_EDX)
541 return CPUMCPUVENDOR_INTEL;
542
543 /** @todo detect the other buggers... */
544 return CPUMCPUVENDOR_UNKNOWN;
545}
546
547
548/**
549 * Fetches overrides for a CPUID leaf.
550 *
551 * @returns VBox status code.
552 * @param pLeaf The leaf to load the overrides into.
553 * @param pCfgNode The CFGM node containing the overrides
554 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
555 * @param iLeaf The CPUID leaf number.
556 */
557static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)
558{
559 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);
560 if (pLeafNode)
561 {
562 uint32_t u32;
563 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
564 if (RT_SUCCESS(rc))
565 pLeaf->eax = u32;
566 else
567 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
568
569 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
570 if (RT_SUCCESS(rc))
571 pLeaf->ebx = u32;
572 else
573 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
574
575 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
576 if (RT_SUCCESS(rc))
577 pLeaf->ecx = u32;
578 else
579 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
580
581 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
582 if (RT_SUCCESS(rc))
583 pLeaf->edx = u32;
584 else
585 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
586
587 }
588 return VINF_SUCCESS;
589}
590
591
592/**
593 * Load the overrides for a set of CPUID leaves.
594 *
595 * @returns VBox status code.
596 * @param paLeaves The leaf array.
597 * @param cLeaves The number of leaves.
598 * @param uStart The start leaf number.
599 * @param pCfgNode The CFGM node containing the overrides
600 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
601 */
602static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
603{
604 for (uint32_t i = 0; i < cLeaves; i++)
605 {
606 int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i);
607 if (RT_FAILURE(rc))
608 return rc;
609 }
610
611 return VINF_SUCCESS;
612}
613
614/**
615 * Init a set of host CPUID leaves.
616 *
617 * @returns VBox status code.
618 * @param paLeaves The leaf array.
619 * @param cLeaves The number of leaves.
620 * @param uStart The start leaf number.
621 * @param pCfgNode The /CPUM/HostCPUID/ node.
622 */
623static int cpumR3CpuIdInitHostSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
624{
625 /* Using the ECX variant for all of them can't hurt... */
626 for (uint32_t i = 0; i < cLeaves; i++)
627 ASMCpuId_Idx_ECX(uStart + i, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);
628
629 /* Load CPUID leaf override; we currently don't care if the user
630 specifies features the host CPU doesn't support. */
631 return cpumR3CpuIdInitLoadOverrideSet(uStart, paLeaves, cLeaves, pCfgNode);
632}
633
634
635/**
636 * Initializes the emulated CPU's cpuid information.
637 *
638 * @returns VBox status code.
639 * @param pVM Pointer to the VM.
640 */
641static int cpumR3CpuIdInit(PVM pVM)
642{
643 PCPUM pCPUM = &pVM->cpum.s;
644 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
645 uint32_t i;
646 int rc;
647
648#define PORTABLE_CLEAR_BITS_WHEN(Lvl, LeafSuffReg, FeatNm, fMask, uValue) \
649 if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fMask)) == (uValue) ) \
650 { \
651 LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: %#x -> 0\n", pCPUM->aGuestCpuId##LeafSuffReg & (fMask))); \
652 pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fMask); \
653 }
654#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, LeafSuffReg, FeatNm, fBitMask) \
655 if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fBitMask)) ) \
656 { \
657 LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: 1 -> 0\n")); \
658 pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fBitMask); \
659 }
660
661 /*
662 * Read the configuration.
663 */
664 /** @cfgm{CPUM/SyntheticCpu, boolean, false}
665 * Enables the Synthetic CPU. The Vendor ID and Processor Name are
666 * completely overridden by VirtualBox custom strings. Some
667 * CPUID information is withheld, like the cache info. */
668 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &pCPUM->fSyntheticCpu, false);
669 AssertRCReturn(rc, rc);
670
671 /** @cfgm{CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}
672 * When non-zero CPUID features that could cause portability issues will be
673 * stripped. The higher the value the more features gets stripped. Higher
674 * values should only be used when older CPUs are involved since it may
675 * harm performance and maybe also cause problems with specific guests. */
676 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, 0);
677 AssertRCReturn(rc, rc);
678
679 AssertLogRelReturn(!pCPUM->fSyntheticCpu || !pCPUM->u8PortableCpuIdLevel, VERR_CPUM_INCOMPATIBLE_CONFIG);
680
681 /*
682 * Get the host CPUID leaves and redetect the guest CPU vendor (could've
683 * been overridden).
684 */
685 /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
686 * Overrides the host CPUID leaf values used for calculating the guest CPUID
687 * leaves. This can be used to preserve the CPUID values when moving a VM
688 * to a different machine. Another use is restricting (or extending) the
689 * feature set exposed to the guest. */
690 PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID");
691 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pHostOverrideCfg);
692 AssertRCReturn(rc, rc);
693 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pHostOverrideCfg);
694 AssertRCReturn(rc, rc);
695 rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg);
696 AssertRCReturn(rc, rc);
697
698 pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx,
699 pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx);
700
701 /*
702 * Determine the default leaf.
703 *
704 * Intel returns values of the highest standard function, while AMD
705 * returns zeros. VIA on the other hand seems to returning nothing or
706 * perhaps some random garbage, we don't try to duplicate this behavior.
707 */
708 ASMCpuId(pCPUM->aGuestCpuIdStd[0].eax + 10, /** @todo r=bird: Use the host value here in case of overrides and more than 10 leaves being stripped already. */
709 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
710 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
711
712
713 /* Cpuid 1 & 0x80000001:
714 * Only report features we can support.
715 *
716 * Note! When enabling new features the Synthetic CPU and Portable CPUID
717 * options may require adjusting (i.e. stripping what was enabled).
718 */
719 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
720 | X86_CPUID_FEATURE_EDX_VME
721 | X86_CPUID_FEATURE_EDX_DE
722 | X86_CPUID_FEATURE_EDX_PSE
723 | X86_CPUID_FEATURE_EDX_TSC
724 | X86_CPUID_FEATURE_EDX_MSR
725 //| X86_CPUID_FEATURE_EDX_PAE - set later if configured.
726 | X86_CPUID_FEATURE_EDX_MCE
727 | X86_CPUID_FEATURE_EDX_CX8
728 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
729 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */
730 //| X86_CPUID_FEATURE_EDX_SEP
731 | X86_CPUID_FEATURE_EDX_MTRR
732 | X86_CPUID_FEATURE_EDX_PGE
733 | X86_CPUID_FEATURE_EDX_MCA
734 | X86_CPUID_FEATURE_EDX_CMOV
735 | X86_CPUID_FEATURE_EDX_PAT
736 | X86_CPUID_FEATURE_EDX_PSE36
737 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
738 | X86_CPUID_FEATURE_EDX_CLFSH
739 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
740 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.
741 | X86_CPUID_FEATURE_EDX_MMX
742 | X86_CPUID_FEATURE_EDX_FXSR
743 | X86_CPUID_FEATURE_EDX_SSE
744 | X86_CPUID_FEATURE_EDX_SSE2
745 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
746 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.
747 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
748 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
749 | 0;
750 pCPUM->aGuestCpuIdStd[1].ecx &= 0
751 | X86_CPUID_FEATURE_ECX_SSE3
752 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
753 | ((pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
754 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
755 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
756 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
757 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
758 | X86_CPUID_FEATURE_ECX_SSSE3
759 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
760 //| X86_CPUID_FEATURE_ECX_CX16 - no cmpxchg16b
761 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
762 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
763 /* ECX Bit 21 - x2APIC support - not yet. */
764 // | X86_CPUID_FEATURE_ECX_X2APIC
765 /* ECX Bit 23 - POPCNT instruction. */
766 //| X86_CPUID_FEATURE_ECX_POPCNT
767 | 0;
768 if (pCPUM->u8PortableCpuIdLevel > 0)
769 {
770 PORTABLE_CLEAR_BITS_WHEN(1, Std[1].eax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
771 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
772 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
773 PORTABLE_DISABLE_FEATURE_BIT(2, Std[1].edx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
774 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, SSE, X86_CPUID_FEATURE_EDX_SSE);
775 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
776 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
777
778 Assert(!(pCPUM->aGuestCpuIdStd[1].edx & ( X86_CPUID_FEATURE_EDX_SEP
779 | X86_CPUID_FEATURE_EDX_PSN
780 | X86_CPUID_FEATURE_EDX_DS
781 | X86_CPUID_FEATURE_EDX_ACPI
782 | X86_CPUID_FEATURE_EDX_SS
783 | X86_CPUID_FEATURE_EDX_TM
784 | X86_CPUID_FEATURE_EDX_PBE
785 )));
786 Assert(!(pCPUM->aGuestCpuIdStd[1].ecx & ( X86_CPUID_FEATURE_ECX_PCLMUL
787 | X86_CPUID_FEATURE_ECX_DTES64
788 | X86_CPUID_FEATURE_ECX_CPLDS
789 | X86_CPUID_FEATURE_ECX_VMX
790 | X86_CPUID_FEATURE_ECX_SMX
791 | X86_CPUID_FEATURE_ECX_EST
792 | X86_CPUID_FEATURE_ECX_TM2
793 | X86_CPUID_FEATURE_ECX_CNTXID
794 | X86_CPUID_FEATURE_ECX_FMA
795 | X86_CPUID_FEATURE_ECX_CX16
796 | X86_CPUID_FEATURE_ECX_TPRUPDATE
797 | X86_CPUID_FEATURE_ECX_PDCM
798 | X86_CPUID_FEATURE_ECX_DCA
799 | X86_CPUID_FEATURE_ECX_MOVBE
800 | X86_CPUID_FEATURE_ECX_AES
801 | X86_CPUID_FEATURE_ECX_POPCNT
802 | X86_CPUID_FEATURE_ECX_XSAVE
803 | X86_CPUID_FEATURE_ECX_OSXSAVE
804 | X86_CPUID_FEATURE_ECX_AVX
805 )));
806 }
807
808 /* Cpuid 0x80000001:
809 * Only report features we can support.
810 *
811 * Note! When enabling new features the Synthetic CPU and Portable CPUID
812 * options may require adjusting (i.e. stripping what was enabled).
813 *
814 * ASSUMES that this is ALWAYS the AMD defined feature set if present.
815 */
816 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
817 | X86_CPUID_AMD_FEATURE_EDX_VME
818 | X86_CPUID_AMD_FEATURE_EDX_DE
819 | X86_CPUID_AMD_FEATURE_EDX_PSE
820 | X86_CPUID_AMD_FEATURE_EDX_TSC
821 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
822 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.
823 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
824 | X86_CPUID_AMD_FEATURE_EDX_CX8
825 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
826 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */
827 //| X86_CPUID_AMD_FEATURE_EDX_SEP
828 | X86_CPUID_AMD_FEATURE_EDX_MTRR
829 | X86_CPUID_AMD_FEATURE_EDX_PGE
830 | X86_CPUID_AMD_FEATURE_EDX_MCA
831 | X86_CPUID_AMD_FEATURE_EDX_CMOV
832 | X86_CPUID_AMD_FEATURE_EDX_PAT
833 | X86_CPUID_AMD_FEATURE_EDX_PSE36
834 //| X86_CPUID_AMD_FEATURE_EDX_NX - not virtualized, requires PAE.
835 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
836 | X86_CPUID_AMD_FEATURE_EDX_MMX
837 | X86_CPUID_AMD_FEATURE_EDX_FXSR
838 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
839 //| X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
840 //| X86_CPUID_AMD_FEATURE_EDX_RDTSCP - AMD only; turned on when necessary
841 //| X86_CPUID_AMD_FEATURE_EDX_LONG_MODE - turned on when necessary
842 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
843 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
844 | 0;
845 pCPUM->aGuestCpuIdExt[1].ecx &= 0
846 //| X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF
847 //| X86_CPUID_AMD_FEATURE_ECX_CMPL
848 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
849 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
850 /* Note: This could prevent teleporting from AMD to Intel CPUs! */
851 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
852 //| X86_CPUID_AMD_FEATURE_ECX_ABM
853 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A
854 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
855 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
856 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
857 //| X86_CPUID_AMD_FEATURE_ECX_IBS
858 //| X86_CPUID_AMD_FEATURE_ECX_SSE5
859 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
860 //| X86_CPUID_AMD_FEATURE_ECX_WDT
861 | 0;
862 if (pCPUM->u8PortableCpuIdLevel > 0)
863 {
864 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].ecx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
865 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
866 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
867 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
868 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP, X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
869 PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF, X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF);
870 PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
871
872 Assert(!(pCPUM->aGuestCpuIdExt[1].ecx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL
873 | X86_CPUID_AMD_FEATURE_ECX_SVM
874 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
875 | X86_CPUID_AMD_FEATURE_ECX_CR8L
876 | X86_CPUID_AMD_FEATURE_ECX_ABM
877 | X86_CPUID_AMD_FEATURE_ECX_SSE4A
878 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
879 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
880 | X86_CPUID_AMD_FEATURE_ECX_OSVW
881 | X86_CPUID_AMD_FEATURE_ECX_IBS
882 | X86_CPUID_AMD_FEATURE_ECX_SSE5
883 | X86_CPUID_AMD_FEATURE_ECX_SKINIT
884 | X86_CPUID_AMD_FEATURE_ECX_WDT
885 | UINT32_C(0xffffc000)
886 )));
887 Assert(!(pCPUM->aGuestCpuIdExt[1].edx & ( RT_BIT(10)
888 | X86_CPUID_AMD_FEATURE_EDX_SEP
889 | RT_BIT(18)
890 | RT_BIT(19)
891 | RT_BIT(21)
892 | X86_CPUID_AMD_FEATURE_EDX_AXMMX
893 | X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
894 | RT_BIT(28)
895 )));
896 }
897
898 /*
899 * Apply the Synthetic CPU modifications. (TODO: move this up)
900 */
901 if (pCPUM->fSyntheticCpu)
902 {
903 static const char s_szVendor[13] = "VirtualBox ";
904 static const char s_szProcessor[48] = "VirtualBox SPARCx86 Processor v1000 "; /* includes null terminator */
905
906 pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC;
907
908 /* Limit the nr of standard leaves; 5 for monitor/mwait */
909 pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5);
910
911 /* 0: Vendor */
912 pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)s_szVendor)[0];
913 pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)s_szVendor)[2];
914 pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)s_szVendor)[1];
915
916 /* 1.eax: Version information. family : model : stepping */
917 pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1;
918
919 /* Leaves 2 - 4 are Intel only - zero them out */
920 memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2]));
921 memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3]));
922 memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4]));
923
924 /* Leaf 5 = monitor/mwait */
925
926 /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */
927 pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008);
928 /* AMD only - set to zero. */
929 pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
930
931 /* 0x800000001: AMD only; shared feature bits are set dynamically. */
932 memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
933
934 /* 0x800000002-4: Processor Name String Identifier. */
935 pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)s_szProcessor)[0];
936 pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)s_szProcessor)[1];
937 pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)s_szProcessor)[2];
938 pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)s_szProcessor)[3];
939 pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)s_szProcessor)[4];
940 pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)s_szProcessor)[5];
941 pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)s_szProcessor)[6];
942 pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)s_szProcessor)[7];
943 pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)s_szProcessor)[8];
944 pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)s_szProcessor)[9];
945 pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)s_szProcessor)[10];
946 pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)s_szProcessor)[11];
947
948 /* 0x800000005-7 - reserved -> zero */
949 memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5]));
950 memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6]));
951 memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7]));
952
953 /* 0x800000008: only the max virtual and physical address size. */
954 pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
955 }
956
957 /*
958 * Hide HTT, multicode, SMP, whatever.
959 * (APIC-ID := 0 and #LogCpus := 0)
960 */
961 pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
962#ifdef VBOX_WITH_MULTI_CORE
963 if ( pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC
964 && pVM->cCpus > 1)
965 {
966 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
967 pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
968 pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */
969 }
970#endif
971
972 /* Cpuid 2:
973 * Intel: Cache and TLB information
974 * AMD: Reserved
975 * Safe to expose; restrict the number of calls to 1 for the portable case.
976 */
977 if ( pCPUM->u8PortableCpuIdLevel > 0
978 && pCPUM->aGuestCpuIdStd[0].eax >= 2
979 && (pCPUM->aGuestCpuIdStd[2].eax & 0xff) > 1)
980 {
981 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCPUM->aGuestCpuIdStd[2].eax & 0xff));
982 pCPUM->aGuestCpuIdStd[2].eax &= UINT32_C(0xfffffffe);
983 }
984
985 /* Cpuid 3:
986 * Intel: EAX, EBX - reserved (transmeta uses these)
987 * ECX, EDX - Processor Serial Number if available, otherwise reserved
988 * AMD: Reserved
989 * Safe to expose
990 */
991 if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
992 {
993 pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
994 if (pCPUM->u8PortableCpuIdLevel > 0)
995 pCPUM->aGuestCpuIdStd[3].eax = pCPUM->aGuestCpuIdStd[3].ebx = 0;
996 }
997
998 /* Cpuid 4:
999 * Intel: Deterministic Cache Parameters Leaf
1000 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 0
1001 * AMD: Reserved
1002 * Safe to expose, except for EAX:
1003 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
1004 * Bits 31-26: Maximum number of processor cores in this physical package**
1005 * Note: These SMP values are constant regardless of ECX
1006 */
1007 pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
1008 pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
1009#ifdef VBOX_WITH_MULTI_CORE
1010 if ( pVM->cCpus > 1
1011 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1012 {
1013 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
1014 /* One logical processor with possibly multiple cores. */
1015 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
1016 pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */
1017 }
1018#endif
1019
1020 /* Cpuid 5: Monitor/mwait Leaf
1021 * Intel: ECX, EDX - reserved
1022 * EAX, EBX - Smallest and largest monitor line size
1023 * AMD: EDX - reserved
1024 * EAX, EBX - Smallest and largest monitor line size
1025 * ECX - extensions (ignored for now)
1026 * Safe to expose
1027 */
1028 if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
1029 pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
1030
1031 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
1032 /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
1033 * Expose MWAIT extended features to the guest. For now we expose
1034 * just MWAIT break on interrupt feature (bit 1).
1035 */
1036 bool fMWaitExtensions;
1037 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); AssertRCReturn(rc, rc);
1038 if (fMWaitExtensions)
1039 {
1040 pCPUM->aGuestCpuIdStd[5].ecx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1041 /* @todo: for now we just expose host's MWAIT C-states, although conceptually
1042 it shall be part of our power management virtualization model */
1043#if 0
1044 /* MWAIT sub C-states */
1045 pCPUM->aGuestCpuIdStd[5].edx =
1046 (0 << 0) /* 0 in C0 */ |
1047 (2 << 4) /* 2 in C1 */ |
1048 (2 << 8) /* 2 in C2 */ |
1049 (2 << 12) /* 2 in C3 */ |
1050 (0 << 16) /* 0 in C4 */
1051 ;
1052#endif
1053 }
1054 else
1055 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
1056
1057 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
1058 * Safe to pass on to the guest.
1059 *
1060 * Intel: 0x800000005 reserved
1061 * 0x800000006 L2 cache information
1062 * AMD: 0x800000005 L1 cache information
1063 * 0x800000006 L2/L3 cache information
1064 */
1065
1066 /* Cpuid 0x800000007:
1067 * AMD: EAX, EBX, ECX - reserved
1068 * EDX: Advanced Power Management Information
1069 * Intel: Reserved
1070 */
1071 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
1072 {
1073 Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID);
1074
1075 pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
1076
1077 if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1078 {
1079 /* Only expose the TSC invariant capability bit to the guest. */
1080 pCPUM->aGuestCpuIdExt[7].edx &= 0
1081 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
1082 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
1083 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
1084 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
1085 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
1086 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
1087 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
1088 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
1089#if 0 /* We don't expose X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR, because newer
1090 * Linux kernels blindly assume that the AMD performance counters work
1091 * if this is set for 64 bits guests. (Can't really find a CPUID feature
1092 * bit for them though.) */
1093 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
1094#endif
1095 | 0;
1096 }
1097 else
1098 pCPUM->aGuestCpuIdExt[7].edx = 0;
1099 }
1100
1101 /* Cpuid 0x800000008:
1102 * AMD: EBX, EDX - reserved
1103 * EAX: Virtual/Physical/Guest address Size
1104 * ECX: Number of cores + APICIdCoreIdSize
1105 * Intel: EAX: Virtual/Physical address Size
1106 * EBX, ECX, EDX - reserved
1107 */
1108 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
1109 {
1110 /* Only expose the virtual and physical address sizes to the guest. */
1111 pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);
1112 pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
1113 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
1114 * NC (0-7) Number of cores; 0 equals 1 core */
1115 pCPUM->aGuestCpuIdExt[8].ecx = 0;
1116#ifdef VBOX_WITH_MULTI_CORE
1117 if ( pVM->cCpus > 1
1118 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1119 {
1120 /* Legacy method to determine the number of cores. */
1121 pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
1122 pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
1123 }
1124#endif
1125 }
1126
1127 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
1128 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
1129 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
1130 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
1131 */
1132 bool fNt4LeafLimit;
1133 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc);
1134 if (fNt4LeafLimit)
1135 pCPUM->aGuestCpuIdStd[0].eax = 3; /** @todo r=bird: shouldn't we check if pCPUM->aGuestCpuIdStd[0].eax > 3 before setting it 3 here? */
1136
1137 /*
1138 * Limit it the number of entries and fill the remaining with the defaults.
1139 *
1140 * The limits are masking off stuff about power saving and similar, this
1141 * is perhaps a bit crudely done as there is probably some relatively harmless
1142 * info too in these leaves (like words about having a constant TSC).
1143 */
1144 if (pCPUM->aGuestCpuIdStd[0].eax > 5)
1145 pCPUM->aGuestCpuIdStd[0].eax = 5;
1146 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
1147 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
1148
1149 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
1150 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
1151 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
1152 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
1153 : 0;
1154 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
1155 i++)
1156 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
1157
1158 /*
1159 * Centaur stuff (VIA).
1160 *
1161 * The important part here (we think) is to make sure the 0xc0000000
1162 * function returns 0xc0000001. As for the features, we don't currently
1163 * let on about any of those... 0xc0000002 seems to be some
1164 * temperature/hz/++ stuff, include it as well (static).
1165 */
1166 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
1167 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
1168 {
1169 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
1170 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
1171 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
1172 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
1173 i++)
1174 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
1175 }
1176 else
1177 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
1178 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
1179
1180 /*
1181 * Hypervisor identification.
1182 *
1183 * We only return minimal information, primarily ensuring that the
1184 * 0x40000000 function returns 0x40000001 and identifying ourselves.
1185 * Currently we do not support any hypervisor-specific interface.
1186 */
1187 pCPUM->aGuestCpuIdHyper[0].eax = UINT32_C(0x40000001);
1188 pCPUM->aGuestCpuIdHyper[0].ebx = pCPUM->aGuestCpuIdHyper[0].ecx
1189 = pCPUM->aGuestCpuIdHyper[0].edx = 0x786f4256; /* 'VBox' */
1190 pCPUM->aGuestCpuIdHyper[1].eax = 0x656e6f6e; /* 'none' */
1191 pCPUM->aGuestCpuIdHyper[1].ebx = pCPUM->aGuestCpuIdHyper[1].ecx
1192 = pCPUM->aGuestCpuIdHyper[1].edx = 0; /* Reserved */
1193
1194 /*
1195 * Load CPUID overrides from configuration.
1196 * Note: Kind of redundant now, but allows unchanged overrides
1197 */
1198 /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
1199 * Overrides the CPUID leaf values. */
1200 PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID");
1201 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pOverrideCfg);
1202 AssertRCReturn(rc, rc);
1203 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pOverrideCfg);
1204 AssertRCReturn(rc, rc);
1205 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg);
1206 AssertRCReturn(rc, rc);
1207
1208 /*
1209 * Check if PAE was explicitely enabled by the user.
1210 */
1211 bool fEnable;
1212 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc);
1213 if (fEnable)
1214 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1215
1216 /*
1217 * We don't normally enable NX for raw-mode, so give the user a chance to
1218 * force it on.
1219 */
1220 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc);
1221 if (fEnable)
1222 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1223
1224 /*
1225 * We don't enable the Hypervisor Present bit by default, but it may
1226 * be needed by some guests.
1227 */
1228 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false); AssertRCReturn(rc, rc);
1229 if (fEnable)
1230 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);
1231 /*
1232 * Log the cpuid and we're good.
1233 */
1234 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
1235 RTCPUSET OnlineSet;
1236 LogRel(("Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
1237 (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
1238 RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
1239 LogRel(("************************* CPUID dump ************************\n"));
1240 DBGFR3Info(pVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
1241 LogRel(("\n"));
1242 DBGFR3InfoLog(pVM, "cpuid", "verbose"); /* macro */
1243 RTLogRelSetBuffering(fOldBuffered);
1244 LogRel(("******************** End of CPUID dump **********************\n"));
1245
1246#undef PORTABLE_DISABLE_FEATURE_BIT
1247#undef PORTABLE_CLEAR_BITS_WHEN
1248
1249 return VINF_SUCCESS;
1250}
1251
1252
1253/**
1254 * Applies relocations to data and code managed by this
1255 * component. This function will be called at init and
1256 * whenever the VMM need to relocate it self inside the GC.
1257 *
1258 * The CPUM will update the addresses used by the switcher.
1259 *
1260 * @param pVM The VM.
1261 */
1262VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
1263{
1264 LogFlow(("CPUMR3Relocate\n"));
1265 /* nothing to do any more. */
1266}
1267
1268
1269/**
1270 * Apply late CPUM property changes based on the fHWVirtEx setting
1271 *
1272 * @param pVM Pointer to the VM.
1273 * @param fHWVirtExEnabled HWVirtEx enabled/disabled
1274 */
1275VMMR3DECL(void) CPUMR3SetHWVirtEx(PVM pVM, bool fHWVirtExEnabled)
1276{
1277 /*
1278 * Workaround for missing cpuid(0) patches when leaf 4 returns GuestCpuIdDef:
1279 * If we miss to patch a cpuid(0).eax then Linux tries to determine the number
1280 * of processors from (cpuid(4).eax >> 26) + 1.
1281 *
1282 * Note: this code is obsolete, but let's keep it here for reference.
1283 * Purpose is valid when we artificially cap the max std id to less than 4.
1284 */
1285 if (!fHWVirtExEnabled)
1286 {
1287 Assert( pVM->cpum.s.aGuestCpuIdStd[4].eax == 0
1288 || pVM->cpum.s.aGuestCpuIdStd[0].eax < 0x4);
1289 pVM->cpum.s.aGuestCpuIdStd[4].eax = 0;
1290 }
1291}
1292
1293/**
1294 * Terminates the CPUM.
1295 *
1296 * Termination means cleaning up and freeing all resources,
1297 * the VM it self is at this point powered off or suspended.
1298 *
1299 * @returns VBox status code.
1300 * @param pVM Pointer to the VM.
1301 */
1302VMMR3DECL(int) CPUMR3Term(PVM pVM)
1303{
1304#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1305 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1306 {
1307 PVMCPU pVCpu = &pVM->aCpus[i];
1308 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1309
1310 memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
1311 pVCpu->cpum.s.uMagic = 0;
1312 pCtx->dr[5] = 0;
1313 }
1314#else
1315 NOREF(pVM);
1316#endif
1317 return VINF_SUCCESS;
1318}
1319
1320
1321/**
1322 * Resets a virtual CPU.
1323 *
1324 * Used by CPUMR3Reset and CPU hot plugging.
1325 *
1326 * @param pVCpu Pointer to the VMCPU.
1327 */
1328VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
1329{
1330 /** @todo anything different for VCPU > 0? */
1331 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1332
1333 /*
1334 * Initialize everything to ZERO first.
1335 */
1336 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
1337 memset(pCtx, 0, sizeof(*pCtx));
1338 pVCpu->cpum.s.fUseFlags = fUseFlags;
1339
1340 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
1341 pCtx->eip = 0x0000fff0;
1342 pCtx->edx = 0x00000600; /* P6 processor */
1343 pCtx->eflags.Bits.u1Reserved0 = 1;
1344
1345 pCtx->cs.Sel = 0xf000;
1346 pCtx->cs.ValidSel = 0xf000;
1347 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1348 pCtx->cs.u64Base = UINT64_C(0xffff0000);
1349 pCtx->cs.u32Limit = 0x0000ffff;
1350 pCtx->cs.Attr.n.u1DescType = 1; /* code/data segment */
1351 pCtx->cs.Attr.n.u1Present = 1;
1352 pCtx->cs.Attr.n.u4Type = X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
1353
1354 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
1355 pCtx->ds.u32Limit = 0x0000ffff;
1356 pCtx->ds.Attr.n.u1DescType = 1; /* code/data segment */
1357 pCtx->ds.Attr.n.u1Present = 1;
1358 pCtx->ds.Attr.n.u4Type = X86_SEL_TYPE_RW;
1359
1360 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
1361 pCtx->es.u32Limit = 0x0000ffff;
1362 pCtx->es.Attr.n.u1DescType = 1; /* code/data segment */
1363 pCtx->es.Attr.n.u1Present = 1;
1364 pCtx->es.Attr.n.u4Type = X86_SEL_TYPE_RW;
1365
1366 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
1367 pCtx->fs.u32Limit = 0x0000ffff;
1368 pCtx->fs.Attr.n.u1DescType = 1; /* code/data segment */
1369 pCtx->fs.Attr.n.u1Present = 1;
1370 pCtx->fs.Attr.n.u4Type = X86_SEL_TYPE_RW;
1371
1372 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
1373 pCtx->gs.u32Limit = 0x0000ffff;
1374 pCtx->gs.Attr.n.u1DescType = 1; /* code/data segment */
1375 pCtx->gs.Attr.n.u1Present = 1;
1376 pCtx->gs.Attr.n.u4Type = X86_SEL_TYPE_RW;
1377
1378 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1379 pCtx->ss.u32Limit = 0x0000ffff;
1380 pCtx->ss.Attr.n.u1Present = 1;
1381 pCtx->ss.Attr.n.u1DescType = 1; /* code/data segment */
1382 pCtx->ss.Attr.n.u4Type = X86_SEL_TYPE_RW;
1383
1384 pCtx->idtr.cbIdt = 0xffff;
1385 pCtx->gdtr.cbGdt = 0xffff;
1386
1387 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1388 pCtx->ldtr.u32Limit = 0xffff;
1389 pCtx->ldtr.Attr.n.u1Present = 1;
1390 pCtx->ldtr.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
1391
1392 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
1393 pCtx->tr.u32Limit = 0xffff;
1394 pCtx->tr.Attr.n.u1Present = 1;
1395 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; /* Deduction, not properly documented by Intel. */
1396
1397 pCtx->dr[6] = X86_DR6_INIT_VAL;
1398 pCtx->dr[7] = X86_DR7_INIT_VAL;
1399
1400 pCtx->fpu.FTW = 0x00; /* All empty (abbridged tag reg edition). */
1401 pCtx->fpu.FCW = 0x37f;
1402
1403 /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1.
1404 IA-32 Processor States Following Power-up, Reset, or INIT */
1405 pCtx->fpu.MXCSR = 0x1F80;
1406 pCtx->fpu.MXCSR_MASK = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really
1407 supports all bits, since a zero value here should be read as 0xffbf. */
1408
1409 /* Init PAT MSR */
1410 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
1411
1412 /* Reset EFER; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State
1413 * The Intel docs don't mention it.
1414 */
1415 pCtx->msrEFER = 0;
1416}
1417
1418
1419/**
1420 * Resets the CPU.
1421 *
1422 * @returns VINF_SUCCESS.
1423 * @param pVM Pointer to the VM.
1424 */
1425VMMR3DECL(void) CPUMR3Reset(PVM pVM)
1426{
1427 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1428 {
1429 CPUMR3ResetCpu(&pVM->aCpus[i]);
1430
1431#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1432 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(&pVM->aCpus[i]);
1433
1434 /* Magic marker for searching in crash dumps. */
1435 strcpy((char *)pVM->aCpus[i].cpum.s.aMagic, "CPUMCPU Magic");
1436 pVM->aCpus[i].cpum.s.uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1437 pCtx->dr[5] = UINT64_C(0xDEADBEEFDEADBEEF);
1438#endif
1439 }
1440}
1441
1442
1443/**
1444 * Called both in pass 0 and the final pass.
1445 *
1446 * @param pVM Pointer to the VM.
1447 * @param pSSM The saved state handle.
1448 */
1449static void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
1450{
1451 /*
1452 * Save all the CPU ID leaves here so we can check them for compatibility
1453 * upon loading.
1454 */
1455 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
1456 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
1457
1458 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
1459 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
1460
1461 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
1462 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
1463
1464 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
1465
1466 /*
1467 * Save a good portion of the raw CPU IDs as well as they may come in
1468 * handy when validating features for raw mode.
1469 */
1470 CPUMCPUID aRawStd[16];
1471 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
1472 ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
1473 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
1474 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
1475
1476 CPUMCPUID aRawExt[32];
1477 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
1478 ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
1479 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
1480 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
1481}
1482
1483
1484/**
1485 * Loads the CPU ID leaves saved by pass 0.
1486 *
1487 * @returns VBox status code.
1488 * @param pVM Pointer to the VM.
1489 * @param pSSM The saved state handle.
1490 * @param uVersion The format version.
1491 */
1492static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
1493{
1494 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
1495
1496 /*
1497 * Define a bunch of macros for simplifying the code.
1498 */
1499 /* Generic expression + failure message. */
1500#define CPUID_CHECK_RET(expr, fmt) \
1501 do { \
1502 if (!(expr)) \
1503 { \
1504 char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \
1505 if (fStrictCpuIdChecks) \
1506 { \
1507 int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \
1508 RTStrFree(pszMsg); \
1509 return rcCpuid; \
1510 } \
1511 LogRel(("CPUM: %s\n", pszMsg)); \
1512 RTStrFree(pszMsg); \
1513 } \
1514 } while (0)
1515#define CPUID_CHECK_WRN(expr, fmt) \
1516 do { \
1517 if (!(expr)) \
1518 LogRel(fmt); \
1519 } while (0)
1520
1521 /* For comparing two values and bitch if they differs. */
1522#define CPUID_CHECK2_RET(what, host, saved) \
1523 do { \
1524 if ((host) != (saved)) \
1525 { \
1526 if (fStrictCpuIdChecks) \
1527 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1528 N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \
1529 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
1530 } \
1531 } while (0)
1532#define CPUID_CHECK2_WRN(what, host, saved) \
1533 do { \
1534 if ((host) != (saved)) \
1535 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
1536 } while (0)
1537
1538 /* For checking raw cpu features (raw mode). */
1539#define CPUID_RAW_FEATURE_RET(set, reg, bit) \
1540 do { \
1541 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
1542 { \
1543 if (fStrictCpuIdChecks) \
1544 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1545 N_(#bit " mismatch: host=%d saved=%d"), \
1546 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \
1547 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
1548 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
1549 } \
1550 } while (0)
1551#define CPUID_RAW_FEATURE_WRN(set, reg, bit) \
1552 do { \
1553 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
1554 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
1555 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
1556 } while (0)
1557#define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0)
1558
1559 /* For checking guest features. */
1560#define CPUID_GST_FEATURE_RET(set, reg, bit) \
1561 do { \
1562 if ( (aGuestCpuId##set [1].reg & bit) \
1563 && !(aHostRaw##set [1].reg & bit) \
1564 && !(aHostOverride##set [1].reg & bit) \
1565 && !(aGuestOverride##set [1].reg & bit) \
1566 ) \
1567 { \
1568 if (fStrictCpuIdChecks) \
1569 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1570 N_(#bit " is not supported by the host but has already exposed to the guest")); \
1571 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
1572 } \
1573 } while (0)
1574#define CPUID_GST_FEATURE_WRN(set, reg, bit) \
1575 do { \
1576 if ( (aGuestCpuId##set [1].reg & bit) \
1577 && !(aHostRaw##set [1].reg & bit) \
1578 && !(aHostOverride##set [1].reg & bit) \
1579 && !(aGuestOverride##set [1].reg & bit) \
1580 ) \
1581 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
1582 } while (0)
1583#define CPUID_GST_FEATURE_EMU(set, reg, bit) \
1584 do { \
1585 if ( (aGuestCpuId##set [1].reg & bit) \
1586 && !(aHostRaw##set [1].reg & bit) \
1587 && !(aHostOverride##set [1].reg & bit) \
1588 && !(aGuestOverride##set [1].reg & bit) \
1589 ) \
1590 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
1591 } while (0)
1592#define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0)
1593
1594 /* For checking guest features if AMD guest CPU. */
1595#define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \
1596 do { \
1597 if ( (aGuestCpuId##set [1].reg & bit) \
1598 && fGuestAmd \
1599 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
1600 && !(aHostOverride##set [1].reg & bit) \
1601 && !(aGuestOverride##set [1].reg & bit) \
1602 ) \
1603 { \
1604 if (fStrictCpuIdChecks) \
1605 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1606 N_(#bit " is not supported by the host but has already exposed to the guest")); \
1607 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
1608 } \
1609 } while (0)
1610#define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \
1611 do { \
1612 if ( (aGuestCpuId##set [1].reg & bit) \
1613 && fGuestAmd \
1614 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
1615 && !(aHostOverride##set [1].reg & bit) \
1616 && !(aGuestOverride##set [1].reg & bit) \
1617 ) \
1618 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
1619 } while (0)
1620#define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \
1621 do { \
1622 if ( (aGuestCpuId##set [1].reg & bit) \
1623 && fGuestAmd \
1624 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
1625 && !(aHostOverride##set [1].reg & bit) \
1626 && !(aGuestOverride##set [1].reg & bit) \
1627 ) \
1628 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
1629 } while (0)
1630#define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0)
1631
1632 /* For checking AMD features which have a corresponding bit in the standard
1633 range. (Intel defines very few bits in the extended feature sets.) */
1634#define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \
1635 do { \
1636 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
1637 && !(fHostAmd \
1638 ? aHostRawExt[1].reg & (ExtBit) \
1639 : aHostRawStd[1].reg & (StdBit)) \
1640 && !(aHostOverrideExt[1].reg & (ExtBit)) \
1641 && !(aGuestOverrideExt[1].reg & (ExtBit)) \
1642 ) \
1643 { \
1644 if (fStrictCpuIdChecks) \
1645 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1646 N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \
1647 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
1648 } \
1649 } while (0)
1650#define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \
1651 do { \
1652 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
1653 && !(fHostAmd \
1654 ? aHostRawExt[1].reg & (ExtBit) \
1655 : aHostRawStd[1].reg & (StdBit)) \
1656 && !(aHostOverrideExt[1].reg & (ExtBit)) \
1657 && !(aGuestOverrideExt[1].reg & (ExtBit)) \
1658 ) \
1659 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
1660 } while (0)
1661#define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \
1662 do { \
1663 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
1664 && !(fHostAmd \
1665 ? aHostRawExt[1].reg & (ExtBit) \
1666 : aHostRawStd[1].reg & (StdBit)) \
1667 && !(aHostOverrideExt[1].reg & (ExtBit)) \
1668 && !(aGuestOverrideExt[1].reg & (ExtBit)) \
1669 ) \
1670 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
1671 } while (0)
1672#define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0)
1673
1674 /*
1675 * Load them into stack buffers first.
1676 */
1677 CPUMCPUID aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)];
1678 uint32_t cGuestCpuIdStd;
1679 int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc);
1680 if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd))
1681 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1682 SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0]));
1683
1684 CPUMCPUID aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)];
1685 uint32_t cGuestCpuIdExt;
1686 rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc);
1687 if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt))
1688 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1689 SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0]));
1690
1691 CPUMCPUID aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)];
1692 uint32_t cGuestCpuIdCentaur;
1693 rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc);
1694 if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur))
1695 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1696 SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0]));
1697
1698 CPUMCPUID GuestCpuIdDef;
1699 rc = SSMR3GetMem(pSSM, &GuestCpuIdDef, sizeof(GuestCpuIdDef));
1700 AssertRCReturn(rc, rc);
1701
1702 CPUMCPUID aRawStd[16];
1703 uint32_t cRawStd;
1704 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
1705 if (cRawStd > RT_ELEMENTS(aRawStd))
1706 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1707 SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
1708
1709 CPUMCPUID aRawExt[32];
1710 uint32_t cRawExt;
1711 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
1712 if (cRawExt > RT_ELEMENTS(aRawExt))
1713 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1714 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
1715 AssertRCReturn(rc, rc);
1716
1717 /*
1718 * Note that we support restoring less than the current amount of standard
1719 * leaves because we've been allowed more is newer version of VBox.
1720 *
1721 * So, pad new entries with the default.
1722 */
1723 for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)
1724 aGuestCpuIdStd[i] = GuestCpuIdDef;
1725
1726 for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)
1727 aGuestCpuIdExt[i] = GuestCpuIdDef;
1728
1729 for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)
1730 aGuestCpuIdCentaur[i] = GuestCpuIdDef;
1731
1732 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
1733 ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
1734
1735 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
1736 ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
1737
1738 /*
1739 * Get the raw CPU IDs for the current host.
1740 */
1741 CPUMCPUID aHostRawStd[16];
1742 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
1743 ASMCpuId(i, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
1744
1745 CPUMCPUID aHostRawExt[32];
1746 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
1747 ASMCpuId(i | UINT32_C(0x80000000), &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
1748
1749 /*
1750 * Get the host and guest overrides so we don't reject the state because
1751 * some feature was enabled thru these interfaces.
1752 * Note! We currently only need the feature leaves, so skip rest.
1753 */
1754 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/CPUID");
1755 CPUMCPUID aGuestOverrideStd[2];
1756 memcpy(&aGuestOverrideStd[0], &aHostRawStd[0], sizeof(aGuestOverrideStd));
1757 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aGuestOverrideStd[0], RT_ELEMENTS(aGuestOverrideStd), pOverrideCfg);
1758
1759 CPUMCPUID aGuestOverrideExt[2];
1760 memcpy(&aGuestOverrideExt[0], &aHostRawExt[0], sizeof(aGuestOverrideExt));
1761 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aGuestOverrideExt[0], RT_ELEMENTS(aGuestOverrideExt), pOverrideCfg);
1762
1763 pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
1764 CPUMCPUID aHostOverrideStd[2];
1765 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
1766 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);
1767
1768 CPUMCPUID aHostOverrideExt[2];
1769 memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt));
1770 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg);
1771
1772 /*
1773 * This can be skipped.
1774 */
1775 bool fStrictCpuIdChecks;
1776 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks, true);
1777
1778
1779
1780 /*
1781 * For raw-mode we'll require that the CPUs are very similar since we don't
1782 * intercept CPUID instructions for user mode applications.
1783 */
1784 if (!HWACCMIsEnabled(pVM))
1785 {
1786 /* CPUID(0) */
1787 CPUID_CHECK_RET( aHostRawStd[0].ebx == aRawStd[0].ebx
1788 && aHostRawStd[0].ecx == aRawStd[0].ecx
1789 && aHostRawStd[0].edx == aRawStd[0].edx,
1790 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
1791 &aHostRawStd[0].ebx, &aHostRawStd[0].edx, &aHostRawStd[0].ecx,
1792 &aRawStd[0].ebx, &aRawStd[0].edx, &aRawStd[0].ecx));
1793 CPUID_CHECK2_WRN("Std CPUID max leaf", aHostRawStd[0].eax, aRawStd[0].eax);
1794 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3);
1795 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);
1796
1797 bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].ebx, aRawStd[0].ecx, aRawStd[0].edx);
1798
1799 /* CPUID(1).eax */
1800 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawStd[1].eax), ASMGetCpuFamily(aRawStd[1].eax));
1801 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawStd[1].eax, fIntel), ASMGetCpuModel(aRawStd[1].eax, fIntel));
1802 CPUID_CHECK2_WRN("CPU type", (aHostRawStd[1].eax >> 12) & 3, (aRawStd[1].eax >> 12) & 3 );
1803
1804 /* CPUID(1).ebx - completely ignore CPU count and APIC ID. */
1805 CPUID_CHECK2_RET("CPU brand ID", aHostRawStd[1].ebx & 0xff, aRawStd[1].ebx & 0xff);
1806 CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].ebx >> 8) & 0xff, (aRawStd[1].ebx >> 8) & 0xff);
1807
1808 /* CPUID(1).ecx */
1809 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);
1810 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);
1811 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);
1812 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
1813 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);
1814 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);
1815 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);
1816 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_EST);
1817 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);
1818 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);
1819 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);
1820 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
1821 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);
1822 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);
1823 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);
1824 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);
1825 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
1826 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/);
1827 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);
1828 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);
1829 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);
1830 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
1831 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);
1832 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);
1833 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/);
1834 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);
1835 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);
1836 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE);
1837 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);
1838 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/);
1839 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/);
1840 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);
1841
1842 /* CPUID(1).edx */
1843 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
1844 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
1845 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);
1846 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
1847 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);
1848 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);
1849 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
1850 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
1851 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);
1852 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
1853 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
1854 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
1855 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
1856 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
1857 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
1858 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);
1859 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
1860 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
1861 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
1862 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);
1863 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
1864 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_DS);
1865 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);
1866 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);
1867 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);
1868 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);
1869 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);
1870 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SS);
1871 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_HTT);
1872 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_TM);
1873 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);
1874 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PBE);
1875
1876 /* CPUID(2) - config, mostly about caches. ignore. */
1877 /* CPUID(3) - processor serial number. ignore. */
1878 /* CPUID(4) - config, cache and topology - takes ECX as input. ignore. */
1879 /* CPUID(5) - mwait/monitor config. ignore. */
1880 /* CPUID(6) - power management. ignore. */
1881 /* CPUID(7) - ???. ignore. */
1882 /* CPUID(8) - ???. ignore. */
1883 /* CPUID(9) - DCA. ignore for now. */
1884 /* CPUID(a) - PeMo info. ignore for now. */
1885 /* CPUID(b) - topology info - takes ECX as input. ignore. */
1886
1887 /* CPUID(d) - XCR0 stuff - takes ECX as input. We only warn about the main level (ECX=0) for now. */
1888 CPUID_CHECK_WRN( aRawStd[0].eax < UINT32_C(0x0000000d)
1889 || aHostRawStd[0].eax >= UINT32_C(0x0000000d),
1890 ("CPUM: Standard leaf D was present on saved state host, not present on current.\n"));
1891 if ( aRawStd[0].eax >= UINT32_C(0x0000000d)
1892 && aHostRawStd[0].eax >= UINT32_C(0x0000000d))
1893 {
1894 CPUID_CHECK2_WRN("Valid low XCR0 bits", aHostRawStd[0xd].eax, aRawStd[0xd].eax);
1895 CPUID_CHECK2_WRN("Valid high XCR0 bits", aHostRawStd[0xd].edx, aRawStd[0xd].edx);
1896 CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size", aHostRawStd[0xd].ebx, aRawStd[0xd].ebx);
1897 CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size", aHostRawStd[0xd].ecx, aRawStd[0xd].ecx);
1898 }
1899
1900 /* CPUID(0x80000000) - same as CPUID(0) except for eax.
1901 Note! Intel have/is marking many of the fields here as reserved. We
1902 will verify them as if it's an AMD CPU. */
1903 CPUID_CHECK_RET( (aHostRawExt[0].eax >= UINT32_C(0x80000001) && aHostRawExt[0].eax <= UINT32_C(0x8000007f))
1904 || !(aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f)),
1905 (N_("Extended leaves was present on saved state host, but is missing on the current\n")));
1906 if (aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f))
1907 {
1908 CPUID_CHECK_RET( aHostRawExt[0].ebx == aRawExt[0].ebx
1909 && aHostRawExt[0].ecx == aRawExt[0].ecx
1910 && aHostRawExt[0].edx == aRawExt[0].edx,
1911 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
1912 &aHostRawExt[0].ebx, &aHostRawExt[0].edx, &aHostRawExt[0].ecx,
1913 &aRawExt[0].ebx, &aRawExt[0].edx, &aRawExt[0].ecx));
1914 CPUID_CHECK2_WRN("Ext CPUID max leaf", aHostRawExt[0].eax, aRawExt[0].eax);
1915
1916 /* CPUID(0x80000001).eax - same as CPUID(0).eax. */
1917 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawExt[1].eax), ASMGetCpuFamily(aRawExt[1].eax));
1918 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawExt[1].eax, fIntel), ASMGetCpuModel(aRawExt[1].eax, fIntel));
1919 CPUID_CHECK2_WRN("CPU type", (aHostRawExt[1].eax >> 12) & 3, (aRawExt[1].eax >> 12) & 3 );
1920 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3 );
1921 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);
1922
1923 /* CPUID(0x80000001).ebx - Brand ID (maybe), just warn if things differs. */
1924 CPUID_CHECK2_WRN("CPU BrandID", aHostRawExt[1].ebx & 0xffff, aRawExt[1].ebx & 0xffff);
1925 CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].ebx >> 16) & 0xfff, (aRawExt[1].ebx >> 16) & 0xfff);
1926 CPUID_CHECK2_WRN("PkgType", (aHostRawExt[1].ebx >> 28) & 0xf, (aRawExt[1].ebx >> 28) & 0xf);
1927
1928 /* CPUID(0x80000001).ecx */
1929 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF);
1930 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);
1931 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);
1932 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);
1933 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);
1934 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);
1935 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);
1936 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);
1937 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);
1938 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);
1939 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);
1940 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);
1941 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);
1942 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);
1943 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
1944 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
1945 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
1946 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
1947 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
1948 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
1949 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
1950 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
1951 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
1952 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
1953 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
1954 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
1955 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
1956 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
1957 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
1958 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
1959 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
1960 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
1961
1962 /* CPUID(0x80000001).edx */
1963 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FPU);
1964 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_VME);
1965 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_DE);
1966 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE);
1967 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_TSC);
1968 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MSR);
1969 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAE);
1970 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCE);
1971 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CX8);
1972 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC);
1973 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/);
1974 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_SEP);
1975 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR);
1976 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE);
1977 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCA);
1978 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CMOV);
1979 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAT);
1980 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE36);
1981 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/);
1982 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/);
1983 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_NX);
1984 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/);
1985 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
1986 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MMX);
1987 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR);
1988 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
1989 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAGE1GB);
1990 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1991 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/);
1992 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1993 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
1994 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
1995
1996 /** @todo verify the rest as well. */
1997 }
1998 }
1999
2000
2001
2002 /*
2003 * Verify that we can support the features already exposed to the guest on
2004 * this host.
2005 *
2006 * Most of the features we're emulating requires intercepting instruction
2007 * and doing it the slow way, so there is no need to warn when they aren't
2008 * present in the host CPU. Thus we use IGN instead of EMU on these.
2009 *
2010 * Trailing comments:
2011 * "EMU" - Possible to emulate, could be lots of work and very slow.
2012 * "EMU?" - Can this be emulated?
2013 */
2014 /* CPUID(1).ecx */
2015 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU
2016 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?
2017 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?
2018 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
2019 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?
2020 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU
2021 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU
2022 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_EST); // -> EMU
2023 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?
2024 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU
2025 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU
2026 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
2027 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?
2028 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?
2029 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
2030 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU
2031 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
2032 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/);
2033 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?
2034 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU
2035 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU
2036 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
2037 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU
2038 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU
2039 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/);
2040 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES); // -> EMU
2041 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU
2042 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU
2043 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?
2044 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/);
2045 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/);
2046 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host
2047
2048 /* CPUID(1).edx */
2049 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
2050 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
2051 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?
2052 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
2053 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
2054 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
2055 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
2056 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
2057 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
2058 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
2059 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
2060 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
2061 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
2062 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
2063 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
2064 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
2065 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
2066 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
2067 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
2068 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU
2069 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
2070 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?
2071 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?
2072 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
2073 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
2074 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU
2075 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU
2076 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?
2077 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?
2078 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?
2079 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU
2080 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?
2081
2082 /* CPUID(0x80000000). */
2083 if ( aGuestCpuIdExt[0].eax >= UINT32_C(0x80000001)
2084 && aGuestCpuIdExt[0].eax < UINT32_C(0x8000007f))
2085 {
2086 /** @todo deal with no 0x80000001 on the host. */
2087 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx);
2088 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].ebx, aGuestCpuIdExt[0].ecx, aGuestCpuIdExt[0].edx);
2089
2090 /* CPUID(0x80000001).ecx */
2091 CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF); // -> EMU
2092 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU
2093 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU
2094 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
2095 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU
2096 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU
2097 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU
2098 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
2099 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
2100 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?
2101 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU
2102 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5); // -> EMU
2103 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU
2104 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU
2105 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
2106 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
2107 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
2108 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
2109 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
2110 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
2111 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
2112 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
2113 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
2114 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
2115 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
2116 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
2117 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
2118 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
2119 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
2120 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
2121 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
2122 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
2123
2124 /* CPUID(0x80000001).edx */
2125 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU
2126 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU
2127 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU
2128 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);
2129 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
2130 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
2131 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);
2132 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);
2133 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
2134 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);
2135 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/);
2136 CPUID_GST_FEATURE_IGN( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_SEP); // Intel: long mode only.
2137 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);
2138 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);
2139 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);
2140 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
2141 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);
2142 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
2143 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/);
2144 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/);
2145 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_NX);
2146 CPUID_GST_FEATURE_WRN( Ext, edx, RT_BIT_32(21) /*reserved*/);
2147 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
2148 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
2149 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
2150 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
2151 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAGE1GB);
2152 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
2153 CPUID_GST_FEATURE_IGN( Ext, edx, RT_BIT_32(28) /*reserved*/);
2154 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
2155 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
2156 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
2157 }
2158
2159 /*
2160 * We're good, commit the CPU ID leaves.
2161 */
2162 memcpy(&pVM->cpum.s.aGuestCpuIdStd[0], &aGuestCpuIdStd[0], sizeof(aGuestCpuIdStd));
2163 memcpy(&pVM->cpum.s.aGuestCpuIdExt[0], &aGuestCpuIdExt[0], sizeof(aGuestCpuIdExt));
2164 memcpy(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur));
2165 pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef;
2166
2167#undef CPUID_CHECK_RET
2168#undef CPUID_CHECK_WRN
2169#undef CPUID_CHECK2_RET
2170#undef CPUID_CHECK2_WRN
2171#undef CPUID_RAW_FEATURE_RET
2172#undef CPUID_RAW_FEATURE_WRN
2173#undef CPUID_RAW_FEATURE_IGN
2174#undef CPUID_GST_FEATURE_RET
2175#undef CPUID_GST_FEATURE_WRN
2176#undef CPUID_GST_FEATURE_EMU
2177#undef CPUID_GST_FEATURE_IGN
2178#undef CPUID_GST_FEATURE2_RET
2179#undef CPUID_GST_FEATURE2_WRN
2180#undef CPUID_GST_FEATURE2_EMU
2181#undef CPUID_GST_FEATURE2_IGN
2182#undef CPUID_GST_AMD_FEATURE_RET
2183#undef CPUID_GST_AMD_FEATURE_WRN
2184#undef CPUID_GST_AMD_FEATURE_EMU
2185#undef CPUID_GST_AMD_FEATURE_IGN
2186
2187 return VINF_SUCCESS;
2188}
2189
2190
2191/**
2192 * Pass 0 live exec callback.
2193 *
2194 * @returns VINF_SSM_DONT_CALL_AGAIN.
2195 * @param pVM Pointer to the VM.
2196 * @param pSSM The saved state handle.
2197 * @param uPass The pass (0).
2198 */
2199static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
2200{
2201 AssertReturn(uPass == 0, VERR_SSM_UNEXPECTED_PASS);
2202 cpumR3SaveCpuId(pVM, pSSM);
2203 return VINF_SSM_DONT_CALL_AGAIN;
2204}
2205
2206
2207/**
2208 * Execute state save operation.
2209 *
2210 * @returns VBox status code.
2211 * @param pVM Pointer to the VM.
2212 * @param pSSM SSM operation handle.
2213 */
2214static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2215{
2216 /*
2217 * Save.
2218 */
2219 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2220 {
2221 PVMCPU pVCpu = &pVM->aCpus[i];
2222#ifdef CPUM_WITH_CHANGED_CPUMCTX
2223 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
2224 g_aCpumCtxFields, NULL);
2225#else
2226 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), SSMSTRUCT_FLAGS_MEM_BAND_AID,
2227 g_aCpumCtxFields, NULL);
2228#endif
2229 }
2230
2231 SSMR3PutU32(pSSM, pVM->cCpus);
2232 SSMR3PutU32(pSSM, sizeof(pVM->aCpus[0].cpum.s.GuestMsrs.msr));
2233 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2234 {
2235 PVMCPU pVCpu = &pVM->aCpus[i];
2236
2237#ifdef CPUM_WITH_CHANGED_CPUMCTX
2238 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
2239 g_aCpumCtxFields, NULL);
2240#else
2241 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), SSMSTRUCT_FLAGS_MEM_BAND_AID,
2242 g_aCpumCtxFields, NULL);
2243#endif
2244 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
2245 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
2246 AssertCompileSizeAlignment(pVM->aCpus[i].cpum.s.GuestMsrs.msr, sizeof(uint64_t));
2247 SSMR3PutMem(pSSM, &pVCpu->cpum.s.GuestMsrs, sizeof(pVM->aCpus[i].cpum.s.GuestMsrs.msr));
2248 }
2249
2250 cpumR3SaveCpuId(pVM, pSSM);
2251 return VINF_SUCCESS;
2252}
2253
2254
2255/**
2256 * @copydoc FNSSMINTLOADPREP
2257 */
2258static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2259{
2260 NOREF(pSSM);
2261 pVM->cpum.s.fPendingRestore = true;
2262 return VINF_SUCCESS;
2263}
2264
2265
2266/**
2267 * @copydoc FNSSMINTLOADEXEC
2268 */
2269static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2270{
2271 /*
2272 * Validate version.
2273 */
2274 if ( uVersion != CPUM_SAVED_STATE_VERSION
2275 && uVersion != CPUM_SAVED_STATE_VERSION_MEM
2276 && uVersion != CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE
2277 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_2
2278 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0
2279 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR
2280 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
2281 && uVersion != CPUM_SAVED_STATE_VERSION_VER1_6)
2282 {
2283 AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
2284 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2285 }
2286
2287 if (uPass == SSM_PASS_FINAL)
2288 {
2289 /*
2290 * Set the size of RTGCPTR for SSMR3GetGCPtr. (Only necessary for
2291 * really old SSM file versions.)
2292 */
2293 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
2294 SSMR3HandleSetGCPtrSize(pSSM, sizeof(RTGCPTR32));
2295 else if (uVersion <= CPUM_SAVED_STATE_VERSION_VER3_0)
2296 SSMR3HandleSetGCPtrSize(pSSM, HC_ARCH_BITS == 32 ? sizeof(RTGCPTR32) : sizeof(RTGCPTR));
2297
2298 PCSSMFIELD paCpumCtxFields = g_aCpumCtxFields;
2299 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
2300 paCpumCtxFields = g_aCpumCtxFieldsV16;
2301 uint32_t fLoad = 0;
2302 if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
2303#ifdef CPUM_WITH_CHANGED_CPUMCTX
2304 fLoad = SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED;
2305#else
2306 fLoad = SSMSTRUCT_FLAGS_MEM_BAND_AID;
2307#endif
2308
2309 /*
2310 * Restore.
2311 */
2312 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2313 {
2314 PVMCPU pVCpu = &pVM->aCpus[i];
2315 uint64_t uCR3 = pVCpu->cpum.s.Hyper.cr3;
2316 uint64_t uRSP = pVCpu->cpum.s.Hyper.rsp; /* see VMMR3Relocate(). */
2317 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), fLoad, paCpumCtxFields, NULL);
2318 pVCpu->cpum.s.Hyper.cr3 = uCR3;
2319 pVCpu->cpum.s.Hyper.rsp = uRSP;
2320 }
2321
2322 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
2323 {
2324 uint32_t cCpus;
2325 int rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
2326 AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
2327 VERR_SSM_UNEXPECTED_DATA);
2328 }
2329 AssertLogRelMsgReturn( uVersion > CPUM_SAVED_STATE_VERSION_VER2_0
2330 || pVM->cCpus == 1,
2331 ("cCpus=%u\n", pVM->cCpus),
2332 VERR_SSM_UNEXPECTED_DATA);
2333
2334 uint32_t cbMsrs = 0;
2335 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
2336 {
2337 int rc = SSMR3GetU32(pSSM, &cbMsrs); AssertRCReturn(rc, rc);
2338 AssertLogRelMsgReturn(RT_ALIGN(cbMsrs, sizeof(uint64_t)) == cbMsrs, ("Size of MSRs is misaligned: %#x\n", cbMsrs),
2339 VERR_SSM_UNEXPECTED_DATA);
2340 AssertLogRelMsgReturn(cbMsrs <= sizeof(CPUMCTXMSRS) && cbMsrs > 0, ("Size of MSRs is out of range: %#x\n", cbMsrs),
2341 VERR_SSM_UNEXPECTED_DATA);
2342 }
2343
2344 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2345 {
2346 SSMR3GetStructEx(pSSM, &pVM->aCpus[i].cpum.s.Guest, sizeof(pVM->aCpus[i].cpum.s.Guest), fLoad,
2347 paCpumCtxFields, NULL);
2348 SSMR3GetU32(pSSM, &pVM->aCpus[i].cpum.s.fUseFlags);
2349 SSMR3GetU32(pSSM, &pVM->aCpus[i].cpum.s.fChanged);
2350 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
2351 SSMR3GetMem(pSSM, &pVM->aCpus[i].cpum.s.GuestMsrs.au64[0], cbMsrs);
2352 else if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_0)
2353 {
2354 SSMR3GetMem(pSSM, &pVM->aCpus[i].cpum.s.GuestMsrs.au64[0], 2 * sizeof(uint64_t)); /* Restore two MSRs. */
2355 SSMR3Skip(pSSM, 62 * sizeof(uint64_t));
2356 }
2357 }
2358
2359 /* Older states does not set CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID for
2360 raw-mode guest, so we have to do it ourselves. */
2361 if ( uVersion <= CPUM_SAVED_STATE_VERSION_VER3_2
2362 && !HWACCMIsEnabled(pVM))
2363 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2364 pVM->aCpus[iCpu].cpum.s.fChanged |= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
2365 }
2366
2367 pVM->cpum.s.fPendingRestore = false;
2368
2369 /*
2370 * Guest CPUIDs.
2371 */
2372 if (uVersion > CPUM_SAVED_STATE_VERSION_VER3_0)
2373 return cpumR3LoadCpuId(pVM, pSSM, uVersion);
2374
2375 /** @todo Merge the code below into cpumR3LoadCpuId when we've found out what is
2376 * actually required. */
2377
2378 /*
2379 * Restore the CPUID leaves.
2380 *
2381 * Note that we support restoring less than the current amount of standard
2382 * leaves because we've been allowed more is newer version of VBox.
2383 */
2384 uint32_t cElements;
2385 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
2386 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
2387 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2388 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdStd[0]));
2389
2390 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
2391 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
2392 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2393 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
2394
2395 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
2396 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
2397 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2398 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
2399
2400 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
2401
2402 /*
2403 * Check that the basic cpuid id information is unchanged.
2404 */
2405 /** @todo we should check the 64 bits capabilities too! */
2406 uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
2407 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
2408 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
2409 uint32_t au32CpuIdSaved[8];
2410 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
2411 if (RT_SUCCESS(rc))
2412 {
2413 /* Ignore CPU stepping. */
2414 au32CpuId[4] &= 0xfffffff0;
2415 au32CpuIdSaved[4] &= 0xfffffff0;
2416
2417 /* Ignore APIC ID (AMD specs). */
2418 au32CpuId[5] &= ~0xff000000;
2419 au32CpuIdSaved[5] &= ~0xff000000;
2420
2421 /* Ignore the number of Logical CPUs (AMD specs). */
2422 au32CpuId[5] &= ~0x00ff0000;
2423 au32CpuIdSaved[5] &= ~0x00ff0000;
2424
2425 /* Ignore some advanced capability bits, that we don't expose to the guest. */
2426 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
2427 | X86_CPUID_FEATURE_ECX_VMX
2428 | X86_CPUID_FEATURE_ECX_SMX
2429 | X86_CPUID_FEATURE_ECX_EST
2430 | X86_CPUID_FEATURE_ECX_TM2
2431 | X86_CPUID_FEATURE_ECX_CNTXID
2432 | X86_CPUID_FEATURE_ECX_TPRUPDATE
2433 | X86_CPUID_FEATURE_ECX_PDCM
2434 | X86_CPUID_FEATURE_ECX_DCA
2435 | X86_CPUID_FEATURE_ECX_X2APIC
2436 );
2437 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
2438 | X86_CPUID_FEATURE_ECX_VMX
2439 | X86_CPUID_FEATURE_ECX_SMX
2440 | X86_CPUID_FEATURE_ECX_EST
2441 | X86_CPUID_FEATURE_ECX_TM2
2442 | X86_CPUID_FEATURE_ECX_CNTXID
2443 | X86_CPUID_FEATURE_ECX_TPRUPDATE
2444 | X86_CPUID_FEATURE_ECX_PDCM
2445 | X86_CPUID_FEATURE_ECX_DCA
2446 | X86_CPUID_FEATURE_ECX_X2APIC
2447 );
2448
2449 /* Make sure we don't forget to update the masks when enabling
2450 * features in the future.
2451 */
2452 AssertRelease(!(pVM->cpum.s.aGuestCpuIdStd[1].ecx &
2453 ( X86_CPUID_FEATURE_ECX_DTES64
2454 | X86_CPUID_FEATURE_ECX_VMX
2455 | X86_CPUID_FEATURE_ECX_SMX
2456 | X86_CPUID_FEATURE_ECX_EST
2457 | X86_CPUID_FEATURE_ECX_TM2
2458 | X86_CPUID_FEATURE_ECX_CNTXID
2459 | X86_CPUID_FEATURE_ECX_TPRUPDATE
2460 | X86_CPUID_FEATURE_ECX_PDCM
2461 | X86_CPUID_FEATURE_ECX_DCA
2462 | X86_CPUID_FEATURE_ECX_X2APIC
2463 )));
2464 /* do the compare */
2465 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
2466 {
2467 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
2468 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
2469 "Saved=%.*Rhxs\n"
2470 "Real =%.*Rhxs\n",
2471 sizeof(au32CpuIdSaved), au32CpuIdSaved,
2472 sizeof(au32CpuId), au32CpuId));
2473 else
2474 {
2475 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
2476 "Saved=%.*Rhxs\n"
2477 "Real =%.*Rhxs\n",
2478 sizeof(au32CpuIdSaved), au32CpuIdSaved,
2479 sizeof(au32CpuId), au32CpuId));
2480 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
2481 }
2482 }
2483 }
2484
2485 return rc;
2486}
2487
2488
2489/**
2490 * @copydoc FNSSMINTLOADPREP
2491 */
2492static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
2493{
2494 if (RT_FAILURE(SSMR3HandleGetStatus(pSSM)))
2495 return VINF_SUCCESS;
2496
2497 /* just check this since we can. */ /** @todo Add a SSM unit flag for indicating that it's mandatory during a restore. */
2498 if (pVM->cpum.s.fPendingRestore)
2499 {
2500 LogRel(("CPUM: Missing state!\n"));
2501 return VERR_INTERNAL_ERROR_2;
2502 }
2503
2504 /* Notify PGM of the NXE states in case they've changed. */
2505 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2506 PGMNotifyNxeChanged(&pVM->aCpus[iCpu], !!(pVM->aCpus[iCpu].cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
2507 return VINF_SUCCESS;
2508}
2509
2510
2511/**
2512 * Checks if the CPUM state restore is still pending.
2513 *
2514 * @returns true / false.
2515 * @param pVM Pointer to the VM.
2516 */
2517VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM)
2518{
2519 return pVM->cpum.s.fPendingRestore;
2520}
2521
2522
2523/**
2524 * Formats the EFLAGS value into mnemonics.
2525 *
2526 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
2527 * @param efl The EFLAGS value.
2528 */
2529static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
2530{
2531 /*
2532 * Format the flags.
2533 */
2534 static const struct
2535 {
2536 const char *pszSet; const char *pszClear; uint32_t fFlag;
2537 } s_aFlags[] =
2538 {
2539 { "vip",NULL, X86_EFL_VIP },
2540 { "vif",NULL, X86_EFL_VIF },
2541 { "ac", NULL, X86_EFL_AC },
2542 { "vm", NULL, X86_EFL_VM },
2543 { "rf", NULL, X86_EFL_RF },
2544 { "nt", NULL, X86_EFL_NT },
2545 { "ov", "nv", X86_EFL_OF },
2546 { "dn", "up", X86_EFL_DF },
2547 { "ei", "di", X86_EFL_IF },
2548 { "tf", NULL, X86_EFL_TF },
2549 { "nt", "pl", X86_EFL_SF },
2550 { "nz", "zr", X86_EFL_ZF },
2551 { "ac", "na", X86_EFL_AF },
2552 { "po", "pe", X86_EFL_PF },
2553 { "cy", "nc", X86_EFL_CF },
2554 };
2555 char *psz = pszEFlags;
2556 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
2557 {
2558 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
2559 if (pszAdd)
2560 {
2561 strcpy(psz, pszAdd);
2562 psz += strlen(pszAdd);
2563 *psz++ = ' ';
2564 }
2565 }
2566 psz[-1] = '\0';
2567}
2568
2569
2570/**
2571 * Formats a full register dump.
2572 *
2573 * @param pVM Pointer to the VM.
2574 * @param pCtx The context to format.
2575 * @param pCtxCore The context core to format.
2576 * @param pHlp Output functions.
2577 * @param enmType The dump type.
2578 * @param pszPrefix Register name prefix.
2579 */
2580static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType,
2581 const char *pszPrefix)
2582{
2583 NOREF(pVM);
2584
2585 /*
2586 * Format the EFLAGS.
2587 */
2588 uint32_t efl = pCtxCore->eflags.u32;
2589 char szEFlags[80];
2590 cpumR3InfoFormatFlags(&szEFlags[0], efl);
2591
2592 /*
2593 * Format the registers.
2594 */
2595 switch (enmType)
2596 {
2597 case CPUMDUMPTYPE_TERSE:
2598 if (CPUMIsGuestIn64BitCodeEx(pCtx))
2599 pHlp->pfnPrintf(pHlp,
2600 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
2601 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
2602 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
2603 "%sr14=%016RX64 %sr15=%016RX64\n"
2604 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
2605 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
2606 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
2607 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
2608 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
2609 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2610 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
2611 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
2612 else
2613 pHlp->pfnPrintf(pHlp,
2614 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
2615 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
2616 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
2617 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
2618 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2619 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
2620 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
2621 break;
2622
2623 case CPUMDUMPTYPE_DEFAULT:
2624 if (CPUMIsGuestIn64BitCodeEx(pCtx))
2625 pHlp->pfnPrintf(pHlp,
2626 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
2627 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
2628 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
2629 "%sr14=%016RX64 %sr15=%016RX64\n"
2630 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
2631 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
2632 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%016RX64:%04x %sldtr=%04x\n"
2633 ,
2634 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
2635 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
2636 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
2637 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2638 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
2639 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
2640 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
2641 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
2642 else
2643 pHlp->pfnPrintf(pHlp,
2644 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
2645 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
2646 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
2647 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%08RX64:%04x %sldtr=%04x\n"
2648 ,
2649 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
2650 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2651 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
2652 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
2653 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
2654 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
2655 break;
2656
2657 case CPUMDUMPTYPE_VERBOSE:
2658 if (CPUMIsGuestIn64BitCodeEx(pCtx))
2659 pHlp->pfnPrintf(pHlp,
2660 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
2661 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
2662 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
2663 "%sr14=%016RX64 %sr15=%016RX64\n"
2664 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
2665 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2666 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2667 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2668 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2669 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2670 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2671 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
2672 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
2673 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
2674 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
2675 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
2676 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
2677 "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
2678 ,
2679 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
2680 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
2681 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
2682 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2683 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u,
2684 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u,
2685 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u,
2686 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u,
2687 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u,
2688 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u,
2689 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
2690 pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1], pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
2691 pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5], pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
2692 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
2693 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
2694 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2695 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
2696 else
2697 pHlp->pfnPrintf(pHlp,
2698 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
2699 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
2700 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
2701 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
2702 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
2703 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
2704 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
2705 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
2706 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
2707 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
2708 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
2709 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
2710 ,
2711 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
2712 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2713 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u, pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1],
2714 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u, pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
2715 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u, pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5],
2716 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u, pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
2717 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
2718 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
2719 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
2720 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
2721 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2722 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
2723
2724 pHlp->pfnPrintf(pHlp,
2725 "%sFCW=%04x %sFSW=%04x %sFTW=%04x %sFOP=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
2726 "%sFPUIP=%08x %sCS=%04x %sRsrvd1=%04x %sFPUDP=%08x %sDS=%04x %sRsvrd2=%04x\n"
2727 ,
2728 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW, pszPrefix, pCtx->fpu.FOP,
2729 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK,
2730 pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsrvd1,
2731 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2
2732 );
2733 unsigned iShift = (pCtx->fpu.FSW >> 11) & 7;
2734 for (unsigned iST = 0; iST < RT_ELEMENTS(pCtx->fpu.aRegs); iST++)
2735 {
2736 unsigned iFPR = (iST + iShift) % RT_ELEMENTS(pCtx->fpu.aRegs);
2737 unsigned uTag = pCtx->fpu.FTW & (1 << iFPR) ? 1 : 0;
2738 char chSign = pCtx->fpu.aRegs[0].au16[4] & 0x8000 ? '-' : '+';
2739 unsigned iInteger = (unsigned)(pCtx->fpu.aRegs[0].au64[0] >> 63);
2740 uint64_t u64Fraction = pCtx->fpu.aRegs[0].au64[0] & UINT64_C(0x7fffffffffffffff);
2741 unsigned uExponent = pCtx->fpu.aRegs[0].au16[4] & 0x7fff;
2742 /** @todo This isn't entirenly correct and needs more work! */
2743 pHlp->pfnPrintf(pHlp,
2744 "%sST(%u)=%sFPR%u={%04RX16'%08RX32'%08RX32} t%d %c%u.%022llu ^ %u",
2745 pszPrefix, iST, pszPrefix, iFPR,
2746 pCtx->fpu.aRegs[0].au16[4], pCtx->fpu.aRegs[0].au32[1], pCtx->fpu.aRegs[0].au32[0],
2747 uTag, chSign, iInteger, u64Fraction, uExponent);
2748 if (pCtx->fpu.aRegs[0].au16[5] || pCtx->fpu.aRegs[0].au16[6] || pCtx->fpu.aRegs[0].au16[7])
2749 pHlp->pfnPrintf(pHlp, " res={%04RX16,%04RX16,%04RX16}\n",
2750 pCtx->fpu.aRegs[0].au16[5], pCtx->fpu.aRegs[0].au16[6], pCtx->fpu.aRegs[0].au16[7]);
2751 else
2752 pHlp->pfnPrintf(pHlp, "\n");
2753 }
2754 for (unsigned iXMM = 0; iXMM < RT_ELEMENTS(pCtx->fpu.aXMM); iXMM++)
2755 pHlp->pfnPrintf(pHlp,
2756 iXMM & 1
2757 ? "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32\n"
2758 : "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32 ",
2759 pszPrefix, iXMM, iXMM < 10 ? " " : "",
2760 pCtx->fpu.aXMM[iXMM].au32[3],
2761 pCtx->fpu.aXMM[iXMM].au32[2],
2762 pCtx->fpu.aXMM[iXMM].au32[1],
2763 pCtx->fpu.aXMM[iXMM].au32[0]);
2764 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->fpu.au32RsrvdRest); i++)
2765 if (pCtx->fpu.au32RsrvdRest[i])
2766 pHlp->pfnPrintf(pHlp, "%sRsrvdRest[i]=%RX32 (offset=%#x)\n",
2767 pszPrefix, i, pCtx->fpu.au32RsrvdRest[i], RT_OFFSETOF(X86FXSTATE, au32RsrvdRest[i]) );
2768
2769 pHlp->pfnPrintf(pHlp,
2770 "%sEFER =%016RX64\n"
2771 "%sPAT =%016RX64\n"
2772 "%sSTAR =%016RX64\n"
2773 "%sCSTAR =%016RX64\n"
2774 "%sLSTAR =%016RX64\n"
2775 "%sSFMASK =%016RX64\n"
2776 "%sKERNELGSBASE =%016RX64\n",
2777 pszPrefix, pCtx->msrEFER,
2778 pszPrefix, pCtx->msrPAT,
2779 pszPrefix, pCtx->msrSTAR,
2780 pszPrefix, pCtx->msrCSTAR,
2781 pszPrefix, pCtx->msrLSTAR,
2782 pszPrefix, pCtx->msrSFMASK,
2783 pszPrefix, pCtx->msrKERNELGSBASE);
2784 break;
2785 }
2786}
2787
2788
2789/**
2790 * Display all cpu states and any other cpum info.
2791 *
2792 * @param pVM Pointer to the VM.
2793 * @param pHlp The info helper functions.
2794 * @param pszArgs Arguments, ignored.
2795 */
2796static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2797{
2798 cpumR3InfoGuest(pVM, pHlp, pszArgs);
2799 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
2800 cpumR3InfoHyper(pVM, pHlp, pszArgs);
2801 cpumR3InfoHost(pVM, pHlp, pszArgs);
2802}
2803
2804
2805/**
2806 * Parses the info argument.
2807 *
2808 * The argument starts with 'verbose', 'terse' or 'default' and then
2809 * continues with the comment string.
2810 *
2811 * @param pszArgs The pointer to the argument string.
2812 * @param penmType Where to store the dump type request.
2813 * @param ppszComment Where to store the pointer to the comment string.
2814 */
2815static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
2816{
2817 if (!pszArgs)
2818 {
2819 *penmType = CPUMDUMPTYPE_DEFAULT;
2820 *ppszComment = "";
2821 }
2822 else
2823 {
2824 if (!strncmp(pszArgs, "verbose", sizeof("verbose") - 1))
2825 {
2826 pszArgs += 7;
2827 *penmType = CPUMDUMPTYPE_VERBOSE;
2828 }
2829 else if (!strncmp(pszArgs, "terse", sizeof("terse") - 1))
2830 {
2831 pszArgs += 5;
2832 *penmType = CPUMDUMPTYPE_TERSE;
2833 }
2834 else if (!strncmp(pszArgs, "default", sizeof("default") - 1))
2835 {
2836 pszArgs += 7;
2837 *penmType = CPUMDUMPTYPE_DEFAULT;
2838 }
2839 else
2840 *penmType = CPUMDUMPTYPE_DEFAULT;
2841 *ppszComment = RTStrStripL(pszArgs);
2842 }
2843}
2844
2845
2846/**
2847 * Display the guest cpu state.
2848 *
2849 * @param pVM Pointer to the VM.
2850 * @param pHlp The info helper functions.
2851 * @param pszArgs Arguments, ignored.
2852 */
2853static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2854{
2855 CPUMDUMPTYPE enmType;
2856 const char *pszComment;
2857 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
2858
2859 /* @todo SMP support! */
2860 PVMCPU pVCpu = VMMGetCpu(pVM);
2861 if (!pVCpu)
2862 pVCpu = &pVM->aCpus[0];
2863
2864 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
2865
2866 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2867 cpumR3InfoOne(pVM, pCtx, CPUMCTX2CORE(pCtx), pHlp, enmType, "");
2868}
2869
2870
2871/**
2872 * Display the current guest instruction
2873 *
2874 * @param pVM Pointer to the VM.
2875 * @param pHlp The info helper functions.
2876 * @param pszArgs Arguments, ignored.
2877 */
2878static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2879{
2880 NOREF(pszArgs);
2881
2882 /** @todo SMP support! */
2883 PVMCPU pVCpu = VMMGetCpu(pVM);
2884 if (!pVCpu)
2885 pVCpu = &pVM->aCpus[0];
2886
2887 char szInstruction[256];
2888 int rc = DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
2889 if (RT_SUCCESS(rc))
2890 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
2891}
2892
2893
2894/**
2895 * Display the hypervisor cpu state.
2896 *
2897 * @param pVM Pointer to the VM.
2898 * @param pHlp The info helper functions.
2899 * @param pszArgs Arguments, ignored.
2900 */
2901static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2902{
2903 CPUMDUMPTYPE enmType;
2904 const char *pszComment;
2905 /* @todo SMP */
2906 PVMCPU pVCpu = &pVM->aCpus[0];
2907
2908 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
2909 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
2910 cpumR3InfoOne(pVM, &pVCpu->cpum.s.Hyper, CPUMCTX2CORE(&pVCpu->cpum.s.Hyper), pHlp, enmType, ".");
2911 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
2912}
2913
2914
2915/**
2916 * Display the host cpu state.
2917 *
2918 * @param pVM Pointer to the VM.
2919 * @param pHlp The info helper functions.
2920 * @param pszArgs Arguments, ignored.
2921 */
2922static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2923{
2924 CPUMDUMPTYPE enmType;
2925 const char *pszComment;
2926 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
2927 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
2928
2929 /*
2930 * Format the EFLAGS.
2931 */
2932 /* @todo SMP */
2933 PCPUMHOSTCTX pCtx = &pVM->aCpus[0].cpum.s.Host;
2934#if HC_ARCH_BITS == 32
2935 uint32_t efl = pCtx->eflags.u32;
2936#else
2937 uint64_t efl = pCtx->rflags;
2938#endif
2939 char szEFlags[80];
2940 cpumR3InfoFormatFlags(&szEFlags[0], efl);
2941
2942 /*
2943 * Format the registers.
2944 */
2945#if HC_ARCH_BITS == 32
2946# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2947 if (!(pCtx->efer & MSR_K6_EFER_LMA))
2948# endif
2949 {
2950 pHlp->pfnPrintf(pHlp,
2951 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
2952 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
2953 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
2954 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
2955 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n"
2956 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
2957 ,
2958 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
2959 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
2960 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
2961 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
2962 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
2963 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->ldtr,
2964 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
2965 }
2966# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2967 else
2968# endif
2969#endif
2970#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2971 {
2972 pHlp->pfnPrintf(pHlp,
2973 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
2974 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
2975 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
2976 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
2977 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
2978 "r14=%016RX64 r15=%016RX64\n"
2979 "iopl=%d %31s\n"
2980 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
2981 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
2982 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
2983 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n"
2984 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n"
2985 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
2986 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
2987 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
2988 ,
2989 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
2990 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
2991 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
2992 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
2993 pCtx->r11, pCtx->r12, pCtx->r13,
2994 pCtx->r14, pCtx->r15,
2995 X86_EFL_GET_IOPL(efl), szEFlags,
2996 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
2997 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
2998 pCtx->cr4, pCtx->ldtr, pCtx->tr,
2999 pCtx->dr0, pCtx->dr1, pCtx->dr2,
3000 pCtx->dr3, pCtx->dr6, pCtx->dr7,
3001 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
3002 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
3003 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
3004 }
3005#endif
3006}
3007
3008
3009/**
3010 * Get L1 cache / TLS associativity.
3011 */
3012static const char *getCacheAss(unsigned u, char *pszBuf)
3013{
3014 if (u == 0)
3015 return "res0 ";
3016 if (u == 1)
3017 return "direct";
3018 if (u == 255)
3019 return "fully";
3020 if (u >= 256)
3021 return "???";
3022
3023 RTStrPrintf(pszBuf, 16, "%d way", u);
3024 return pszBuf;
3025}
3026
3027
3028/**
3029 * Get L2 cache associativity.
3030 */
3031const char *getL2CacheAss(unsigned u)
3032{
3033 switch (u)
3034 {
3035 case 0: return "off ";
3036 case 1: return "direct";
3037 case 2: return "2 way ";
3038 case 3: return "res3 ";
3039 case 4: return "4 way ";
3040 case 5: return "res5 ";
3041 case 6: return "8 way ";
3042 case 7: return "res7 ";
3043 case 8: return "16 way";
3044 case 9: return "res9 ";
3045 case 10: return "res10 ";
3046 case 11: return "res11 ";
3047 case 12: return "res12 ";
3048 case 13: return "res13 ";
3049 case 14: return "res14 ";
3050 case 15: return "fully ";
3051 default: return "????";
3052 }
3053}
3054
3055
3056/**
3057 * Display the guest CpuId leaves.
3058 *
3059 * @param pVM Pointer to the VM.
3060 * @param pHlp The info helper functions.
3061 * @param pszArgs "terse", "default" or "verbose".
3062 */
3063static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3064{
3065 /*
3066 * Parse the argument.
3067 */
3068 unsigned iVerbosity = 1;
3069 if (pszArgs)
3070 {
3071 pszArgs = RTStrStripL(pszArgs);
3072 if (!strcmp(pszArgs, "terse"))
3073 iVerbosity--;
3074 else if (!strcmp(pszArgs, "verbose"))
3075 iVerbosity++;
3076 }
3077
3078 /*
3079 * Start cracking.
3080 */
3081 CPUMCPUID Host;
3082 CPUMCPUID Guest;
3083 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdStd[0].eax;
3084
3085 pHlp->pfnPrintf(pHlp,
3086 " RAW Standard CPUIDs\n"
3087 " Function eax ebx ecx edx\n");
3088 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
3089 {
3090 Guest = pVM->cpum.s.aGuestCpuIdStd[i];
3091 ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3092
3093 pHlp->pfnPrintf(pHlp,
3094 "Gst: %08x %08x %08x %08x %08x%s\n"
3095 "Hst: %08x %08x %08x %08x\n",
3096 i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
3097 i <= cStdMax ? "" : "*",
3098 Host.eax, Host.ebx, Host.ecx, Host.edx);
3099 }
3100
3101 /*
3102 * If verbose, decode it.
3103 */
3104 if (iVerbosity)
3105 {
3106 Guest = pVM->cpum.s.aGuestCpuIdStd[0];
3107 pHlp->pfnPrintf(pHlp,
3108 "Name: %.04s%.04s%.04s\n"
3109 "Supports: 0-%x\n",
3110 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
3111 }
3112
3113 /*
3114 * Get Features.
3115 */
3116 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdStd[0].ebx,
3117 pVM->cpum.s.aGuestCpuIdStd[0].ecx,
3118 pVM->cpum.s.aGuestCpuIdStd[0].edx);
3119 if (cStdMax >= 1 && iVerbosity)
3120 {
3121 static const char * const s_apszTypes[4] = { "primary", "overdrive", "MP", "reserved" };
3122
3123 Guest = pVM->cpum.s.aGuestCpuIdStd[1];
3124 uint32_t uEAX = Guest.eax;
3125
3126 pHlp->pfnPrintf(pHlp,
3127 "Family: %d \tExtended: %d \tEffective: %d\n"
3128 "Model: %d \tExtended: %d \tEffective: %d\n"
3129 "Stepping: %d\n"
3130 "Type: %d (%s)\n"
3131 "APIC ID: %#04x\n"
3132 "Logical CPUs: %d\n"
3133 "CLFLUSH Size: %d\n"
3134 "Brand ID: %#04x\n",
3135 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
3136 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
3137 ASMGetCpuStepping(uEAX),
3138 (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
3139 (Guest.ebx >> 24) & 0xff,
3140 (Guest.ebx >> 16) & 0xff,
3141 (Guest.ebx >> 8) & 0xff,
3142 (Guest.ebx >> 0) & 0xff);
3143 if (iVerbosity == 1)
3144 {
3145 uint32_t uEDX = Guest.edx;
3146 pHlp->pfnPrintf(pHlp, "Features EDX: ");
3147 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
3148 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
3149 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
3150 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
3151 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
3152 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
3153 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
3154 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
3155 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
3156 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
3157 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
3158 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SEP");
3159 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
3160 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
3161 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
3162 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
3163 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
3164 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
3165 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " PSN");
3166 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " CLFSH");
3167 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " 20");
3168 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " DS");
3169 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ACPI");
3170 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
3171 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
3172 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " SSE");
3173 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " SSE2");
3174 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " SS");
3175 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " HTT");
3176 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " TM");
3177 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
3178 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " PBE");
3179 pHlp->pfnPrintf(pHlp, "\n");
3180
3181 uint32_t uECX = Guest.ecx;
3182 pHlp->pfnPrintf(pHlp, "Features ECX: ");
3183 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3");
3184 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " PCLMUL");
3185 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DTES64");
3186 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " MONITOR");
3187 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " DS-CPL");
3188 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " VMX");
3189 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SMX");
3190 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " EST");
3191 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " TM2");
3192 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " SSSE3");
3193 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " CNXT-ID");
3194 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " 11");
3195 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " FMA");
3196 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " CX16");
3197 if (uECX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " TPRUPDATE");
3198 if (uECX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " PDCM");
3199 if (uECX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " 16");
3200 if (uECX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PCID");
3201 if (uECX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " DCA");
3202 if (uECX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " SSE4.1");
3203 if (uECX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " SSE4.2");
3204 if (uECX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " X2APIC");
3205 if (uECX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " MOVBE");
3206 if (uECX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " POPCNT");
3207 if (uECX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " TSCDEADL");
3208 if (uECX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " AES");
3209 if (uECX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " XSAVE");
3210 if (uECX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " OSXSAVE");
3211 if (uECX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " AVX");
3212 if (uECX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " 29");
3213 if (uECX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
3214 if (uECX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 31");
3215 pHlp->pfnPrintf(pHlp, "\n");
3216 }
3217 else
3218 {
3219 ASMCpuId(1, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3220
3221 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
3222 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
3223 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
3224 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
3225
3226 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
3227 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", EdxGuest.u1FPU, EdxHost.u1FPU);
3228 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", EdxGuest.u1VME, EdxHost.u1VME);
3229 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", EdxGuest.u1DE, EdxHost.u1DE);
3230 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", EdxGuest.u1PSE, EdxHost.u1PSE);
3231 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", EdxGuest.u1TSC, EdxHost.u1TSC);
3232 pHlp->pfnPrintf(pHlp, "MSR - Model Specific Registers = %d (%d)\n", EdxGuest.u1MSR, EdxHost.u1MSR);
3233 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", EdxGuest.u1PAE, EdxHost.u1PAE);
3234 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", EdxGuest.u1MCE, EdxHost.u1MCE);
3235 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", EdxGuest.u1CX8, EdxHost.u1CX8);
3236 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", EdxGuest.u1APIC, EdxHost.u1APIC);
3237 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", EdxGuest.u1Reserved1, EdxHost.u1Reserved1);
3238 pHlp->pfnPrintf(pHlp, "SEP - SYSENTER and SYSEXIT = %d (%d)\n", EdxGuest.u1SEP, EdxHost.u1SEP);
3239 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", EdxGuest.u1MTRR, EdxHost.u1MTRR);
3240 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", EdxGuest.u1PGE, EdxHost.u1PGE);
3241 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", EdxGuest.u1MCA, EdxHost.u1MCA);
3242 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", EdxGuest.u1CMOV, EdxHost.u1CMOV);
3243 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", EdxGuest.u1PAT, EdxHost.u1PAT);
3244 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", EdxGuest.u1PSE36, EdxHost.u1PSE36);
3245 pHlp->pfnPrintf(pHlp, "PSN - Processor Serial Number = %d (%d)\n", EdxGuest.u1PSN, EdxHost.u1PSN);
3246 pHlp->pfnPrintf(pHlp, "CLFSH - CLFLUSH Instruction. = %d (%d)\n", EdxGuest.u1CLFSH, EdxHost.u1CLFSH);
3247 pHlp->pfnPrintf(pHlp, "20 - Reserved = %d (%d)\n", EdxGuest.u1Reserved2, EdxHost.u1Reserved2);
3248 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", EdxGuest.u1DS, EdxHost.u1DS);
3249 pHlp->pfnPrintf(pHlp, "ACPI - Thermal Mon. & Soft. Clock Ctrl.= %d (%d)\n", EdxGuest.u1ACPI, EdxHost.u1ACPI);
3250 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", EdxGuest.u1MMX, EdxHost.u1MMX);
3251 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", EdxGuest.u1FXSR, EdxHost.u1FXSR);
3252 pHlp->pfnPrintf(pHlp, "SSE - SSE Support = %d (%d)\n", EdxGuest.u1SSE, EdxHost.u1SSE);
3253 pHlp->pfnPrintf(pHlp, "SSE2 - SSE2 Support = %d (%d)\n", EdxGuest.u1SSE2, EdxHost.u1SSE2);
3254 pHlp->pfnPrintf(pHlp, "SS - Self Snoop = %d (%d)\n", EdxGuest.u1SS, EdxHost.u1SS);
3255 pHlp->pfnPrintf(pHlp, "HTT - Hyper-Threading Technology = %d (%d)\n", EdxGuest.u1HTT, EdxHost.u1HTT);
3256 pHlp->pfnPrintf(pHlp, "TM - Thermal Monitor = %d (%d)\n", EdxGuest.u1TM, EdxHost.u1TM);
3257 pHlp->pfnPrintf(pHlp, "30 - Reserved = %d (%d)\n", EdxGuest.u1Reserved3, EdxHost.u1Reserved3);
3258 pHlp->pfnPrintf(pHlp, "PBE - Pending Break Enable = %d (%d)\n", EdxGuest.u1PBE, EdxHost.u1PBE);
3259
3260 pHlp->pfnPrintf(pHlp, "Supports SSE3 = %d (%d)\n", EcxGuest.u1SSE3, EcxHost.u1SSE3);
3261 pHlp->pfnPrintf(pHlp, "PCLMULQDQ = %d (%d)\n", EcxGuest.u1PCLMULQDQ, EcxHost.u1PCLMULQDQ);
3262 pHlp->pfnPrintf(pHlp, "DS Area 64-bit layout = %d (%d)\n", EcxGuest.u1DTE64, EcxHost.u1DTE64);
3263 pHlp->pfnPrintf(pHlp, "Supports MONITOR/MWAIT = %d (%d)\n", EcxGuest.u1Monitor, EcxHost.u1Monitor);
3264 pHlp->pfnPrintf(pHlp, "CPL-DS - CPL Qualified Debug Store = %d (%d)\n", EcxGuest.u1CPLDS, EcxHost.u1CPLDS);
3265 pHlp->pfnPrintf(pHlp, "VMX - Virtual Machine Technology = %d (%d)\n", EcxGuest.u1VMX, EcxHost.u1VMX);
3266 pHlp->pfnPrintf(pHlp, "SMX - Safer Mode Extensions = %d (%d)\n", EcxGuest.u1SMX, EcxHost.u1SMX);
3267 pHlp->pfnPrintf(pHlp, "Enhanced SpeedStep Technology = %d (%d)\n", EcxGuest.u1EST, EcxHost.u1EST);
3268 pHlp->pfnPrintf(pHlp, "Terminal Monitor 2 = %d (%d)\n", EcxGuest.u1TM2, EcxHost.u1TM2);
3269 pHlp->pfnPrintf(pHlp, "Supplemental SSE3 instructions = %d (%d)\n", EcxGuest.u1SSSE3, EcxHost.u1SSSE3);
3270 pHlp->pfnPrintf(pHlp, "L1 Context ID = %d (%d)\n", EcxGuest.u1CNTXID, EcxHost.u1CNTXID);
3271 pHlp->pfnPrintf(pHlp, "11 - Reserved = %d (%d)\n", EcxGuest.u1Reserved1, EcxHost.u1Reserved1);
3272 pHlp->pfnPrintf(pHlp, "FMA extensions using YMM state = %d (%d)\n", EcxGuest.u1FMA, EcxHost.u1FMA);
3273 pHlp->pfnPrintf(pHlp, "CMPXCHG16B instruction = %d (%d)\n", EcxGuest.u1CX16, EcxHost.u1CX16);
3274 pHlp->pfnPrintf(pHlp, "xTPR Update Control = %d (%d)\n", EcxGuest.u1TPRUpdate, EcxHost.u1TPRUpdate);
3275 pHlp->pfnPrintf(pHlp, "Perf/Debug Capability MSR = %d (%d)\n", EcxGuest.u1PDCM, EcxHost.u1PDCM);
3276 pHlp->pfnPrintf(pHlp, "16 - Reserved = %d (%d)\n", EcxGuest.u1Reserved2, EcxHost.u1Reserved2);
3277 pHlp->pfnPrintf(pHlp, "PCID - Process-context identifiers = %d (%d)\n", EcxGuest.u1PCID, EcxHost.u1PCID);
3278 pHlp->pfnPrintf(pHlp, "DCA - Direct Cache Access = %d (%d)\n", EcxGuest.u1DCA, EcxHost.u1DCA);
3279 pHlp->pfnPrintf(pHlp, "SSE4.1 instruction extensions = %d (%d)\n", EcxGuest.u1SSE4_1, EcxHost.u1SSE4_1);
3280 pHlp->pfnPrintf(pHlp, "SSE4.2 instruction extensions = %d (%d)\n", EcxGuest.u1SSE4_2, EcxHost.u1SSE4_2);
3281 pHlp->pfnPrintf(pHlp, "Supports the x2APIC extensions = %d (%d)\n", EcxGuest.u1x2APIC, EcxHost.u1x2APIC);
3282 pHlp->pfnPrintf(pHlp, "MOVBE instruction = %d (%d)\n", EcxGuest.u1MOVBE, EcxHost.u1MOVBE);
3283 pHlp->pfnPrintf(pHlp, "POPCNT instruction = %d (%d)\n", EcxGuest.u1POPCNT, EcxHost.u1POPCNT);
3284 pHlp->pfnPrintf(pHlp, "TSC-Deadline LAPIC timer mode = %d (%d)\n", EcxGuest.u1TSCDEADLINE,EcxHost.u1TSCDEADLINE);
3285 pHlp->pfnPrintf(pHlp, "AESNI instruction extensions = %d (%d)\n", EcxGuest.u1AES, EcxHost.u1AES);
3286 pHlp->pfnPrintf(pHlp, "XSAVE/XRSTOR extended state feature = %d (%d)\n", EcxGuest.u1XSAVE, EcxHost.u1XSAVE);
3287 pHlp->pfnPrintf(pHlp, "Supports OSXSAVE = %d (%d)\n", EcxGuest.u1OSXSAVE, EcxHost.u1OSXSAVE);
3288 pHlp->pfnPrintf(pHlp, "AVX instruction extensions = %d (%d)\n", EcxGuest.u1AVX, EcxHost.u1AVX);
3289 pHlp->pfnPrintf(pHlp, "29/30 - Reserved = %#x (%#x)\n",EcxGuest.u2Reserved3, EcxHost.u2Reserved3);
3290 pHlp->pfnPrintf(pHlp, "Hypervisor Present (we're a guest) = %d (%d)\n", EcxGuest.u1HVP, EcxHost.u1HVP);
3291 }
3292 }
3293 if (cStdMax >= 2 && iVerbosity)
3294 {
3295 /** @todo */
3296 }
3297
3298 /*
3299 * Extended.
3300 * Implemented after AMD specs.
3301 */
3302 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdExt[0].eax & 0xffff;
3303
3304 pHlp->pfnPrintf(pHlp,
3305 "\n"
3306 " RAW Extended CPUIDs\n"
3307 " Function eax ebx ecx edx\n");
3308 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt); i++)
3309 {
3310 Guest = pVM->cpum.s.aGuestCpuIdExt[i];
3311 ASMCpuId(0x80000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3312
3313 pHlp->pfnPrintf(pHlp,
3314 "Gst: %08x %08x %08x %08x %08x%s\n"
3315 "Hst: %08x %08x %08x %08x\n",
3316 0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
3317 i <= cExtMax ? "" : "*",
3318 Host.eax, Host.ebx, Host.ecx, Host.edx);
3319 }
3320
3321 /*
3322 * Understandable output
3323 */
3324 if (iVerbosity)
3325 {
3326 Guest = pVM->cpum.s.aGuestCpuIdExt[0];
3327 pHlp->pfnPrintf(pHlp,
3328 "Ext Name: %.4s%.4s%.4s\n"
3329 "Ext Supports: 0x80000000-%#010x\n",
3330 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
3331 }
3332
3333 if (iVerbosity && cExtMax >= 1)
3334 {
3335 Guest = pVM->cpum.s.aGuestCpuIdExt[1];
3336 uint32_t uEAX = Guest.eax;
3337 pHlp->pfnPrintf(pHlp,
3338 "Family: %d \tExtended: %d \tEffective: %d\n"
3339 "Model: %d \tExtended: %d \tEffective: %d\n"
3340 "Stepping: %d\n"
3341 "Brand ID: %#05x\n",
3342 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
3343 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
3344 ASMGetCpuStepping(uEAX),
3345 Guest.ebx & 0xfff);
3346
3347 if (iVerbosity == 1)
3348 {
3349 uint32_t uEDX = Guest.edx;
3350 pHlp->pfnPrintf(pHlp, "Features EDX: ");
3351 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
3352 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
3353 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
3354 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
3355 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
3356 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
3357 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
3358 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
3359 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
3360 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
3361 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
3362 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SCR");
3363 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
3364 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
3365 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
3366 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
3367 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
3368 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
3369 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " 18");
3370 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " 19");
3371 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " NX");
3372 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " 21");
3373 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ExtMMX");
3374 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
3375 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
3376 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " FastFXSR");
3377 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " Page1GB");
3378 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " RDTSCP");
3379 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " 28");
3380 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " LongMode");
3381 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " Ext3DNow");
3382 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 3DNow");
3383 pHlp->pfnPrintf(pHlp, "\n");
3384
3385 uint32_t uECX = Guest.ecx;
3386 pHlp->pfnPrintf(pHlp, "Features ECX: ");
3387 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
3388 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " CMPL");
3389 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " SVM");
3390 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " ExtAPIC");
3391 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " CR8L");
3392 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " ABM");
3393 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SSE4A");
3394 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MISALNSSE");
3395 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " 3DNOWPRF");
3396 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " OSVW");
3397 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " IBS");
3398 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SSE5");
3399 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " SKINIT");
3400 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " WDT");
3401 for (unsigned iBit = 5; iBit < 32; iBit++)
3402 if (uECX & RT_BIT(iBit))
3403 pHlp->pfnPrintf(pHlp, " %d", iBit);
3404 pHlp->pfnPrintf(pHlp, "\n");
3405 }
3406 else
3407 {
3408 ASMCpuId(0x80000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3409
3410 uint32_t uEdxGst = Guest.edx;
3411 uint32_t uEdxHst = Host.edx;
3412 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
3413 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
3414 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
3415 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
3416 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
3417 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
3418 pHlp->pfnPrintf(pHlp, "MSR - K86 Model Specific Registers = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
3419 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
3420 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
3421 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
3422 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
3423 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
3424 pHlp->pfnPrintf(pHlp, "SEP - SYSCALL and SYSRET = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
3425 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
3426 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
3427 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
3428 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
3429 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
3430 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
3431 pHlp->pfnPrintf(pHlp, "18 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
3432 pHlp->pfnPrintf(pHlp, "19 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
3433 pHlp->pfnPrintf(pHlp, "NX - No-Execute Page Protection = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
3434 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
3435 pHlp->pfnPrintf(pHlp, "AXMMX - AMD Extensions to MMX Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
3436 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
3437 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
3438 pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
3439 pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
3440 pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));
3441 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28)));
3442 pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));
3443 pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));
3444 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));
3445
3446 uint32_t uEcxGst = Guest.ecx;
3447 uint32_t uEcxHst = Host.ecx;
3448 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0)));
3449 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1)));
3450 pHlp->pfnPrintf(pHlp, "SVM - AMD VM Extensions = %d (%d)\n", !!(uEcxGst & RT_BIT( 2)), !!(uEcxHst & RT_BIT( 2)));
3451 pHlp->pfnPrintf(pHlp, "APIC registers starting at 0x400 = %d (%d)\n", !!(uEcxGst & RT_BIT( 3)), !!(uEcxHst & RT_BIT( 3)));
3452 pHlp->pfnPrintf(pHlp, "AltMovCR8 - LOCK MOV CR0 means MOV CR8 = %d (%d)\n", !!(uEcxGst & RT_BIT( 4)), !!(uEcxHst & RT_BIT( 4)));
3453 pHlp->pfnPrintf(pHlp, "Advanced bit manipulation = %d (%d)\n", !!(uEcxGst & RT_BIT( 5)), !!(uEcxHst & RT_BIT( 5)));
3454 pHlp->pfnPrintf(pHlp, "SSE4A instruction support = %d (%d)\n", !!(uEcxGst & RT_BIT( 6)), !!(uEcxHst & RT_BIT( 6)));
3455 pHlp->pfnPrintf(pHlp, "Misaligned SSE mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 7)), !!(uEcxHst & RT_BIT( 7)));
3456 pHlp->pfnPrintf(pHlp, "PREFETCH and PREFETCHW instruction = %d (%d)\n", !!(uEcxGst & RT_BIT( 8)), !!(uEcxHst & RT_BIT( 8)));
3457 pHlp->pfnPrintf(pHlp, "OS visible workaround = %d (%d)\n", !!(uEcxGst & RT_BIT( 9)), !!(uEcxHst & RT_BIT( 9)));
3458 pHlp->pfnPrintf(pHlp, "Instruction based sampling = %d (%d)\n", !!(uEcxGst & RT_BIT(10)), !!(uEcxHst & RT_BIT(10)));
3459 pHlp->pfnPrintf(pHlp, "SSE5 support = %d (%d)\n", !!(uEcxGst & RT_BIT(11)), !!(uEcxHst & RT_BIT(11)));
3460 pHlp->pfnPrintf(pHlp, "SKINIT, STGI, and DEV support = %d (%d)\n", !!(uEcxGst & RT_BIT(12)), !!(uEcxHst & RT_BIT(12)));
3461 pHlp->pfnPrintf(pHlp, "Watchdog timer support. = %d (%d)\n", !!(uEcxGst & RT_BIT(13)), !!(uEcxHst & RT_BIT(13)));
3462 pHlp->pfnPrintf(pHlp, "31:14 - Reserved = %#x (%#x)\n", uEcxGst >> 14, uEcxHst >> 14);
3463 }
3464 }
3465
3466 if (iVerbosity && cExtMax >= 2)
3467 {
3468 char szString[4*4*3+1] = {0};
3469 uint32_t *pu32 = (uint32_t *)szString;
3470 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].eax;
3471 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ebx;
3472 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ecx;
3473 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].edx;
3474 if (cExtMax >= 3)
3475 {
3476 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].eax;
3477 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ebx;
3478 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ecx;
3479 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].edx;
3480 }
3481 if (cExtMax >= 4)
3482 {
3483 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].eax;
3484 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ebx;
3485 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ecx;
3486 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].edx;
3487 }
3488 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString);
3489 }
3490
3491 if (iVerbosity && cExtMax >= 5)
3492 {
3493 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[5].eax;
3494 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[5].ebx;
3495 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[5].ecx;
3496 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[5].edx;
3497 char sz1[32];
3498 char sz2[32];
3499
3500 pHlp->pfnPrintf(pHlp,
3501 "TLB 2/4M Instr/Uni: %s %3d entries\n"
3502 "TLB 2/4M Data: %s %3d entries\n",
3503 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
3504 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
3505 pHlp->pfnPrintf(pHlp,
3506 "TLB 4K Instr/Uni: %s %3d entries\n"
3507 "TLB 4K Data: %s %3d entries\n",
3508 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
3509 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
3510 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
3511 "L1 Instr Cache Lines Per Tag: %d\n"
3512 "L1 Instr Cache Associativity: %s\n"
3513 "L1 Instr Cache Size: %d KB\n",
3514 (uEDX >> 0) & 0xff,
3515 (uEDX >> 8) & 0xff,
3516 getCacheAss((uEDX >> 16) & 0xff, sz1),
3517 (uEDX >> 24) & 0xff);
3518 pHlp->pfnPrintf(pHlp,
3519 "L1 Data Cache Line Size: %d bytes\n"
3520 "L1 Data Cache Lines Per Tag: %d\n"
3521 "L1 Data Cache Associativity: %s\n"
3522 "L1 Data Cache Size: %d KB\n",
3523 (uECX >> 0) & 0xff,
3524 (uECX >> 8) & 0xff,
3525 getCacheAss((uECX >> 16) & 0xff, sz1),
3526 (uECX >> 24) & 0xff);
3527 }
3528
3529 if (iVerbosity && cExtMax >= 6)
3530 {
3531 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[6].eax;
3532 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[6].ebx;
3533 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[6].edx;
3534
3535 pHlp->pfnPrintf(pHlp,
3536 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
3537 "L2 TLB 2/4M Data: %s %4d entries\n",
3538 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
3539 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
3540 pHlp->pfnPrintf(pHlp,
3541 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
3542 "L2 TLB 4K Data: %s %4d entries\n",
3543 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
3544 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
3545 pHlp->pfnPrintf(pHlp,
3546 "L2 Cache Line Size: %d bytes\n"
3547 "L2 Cache Lines Per Tag: %d\n"
3548 "L2 Cache Associativity: %s\n"
3549 "L2 Cache Size: %d KB\n",
3550 (uEDX >> 0) & 0xff,
3551 (uEDX >> 8) & 0xf,
3552 getL2CacheAss((uEDX >> 12) & 0xf),
3553 (uEDX >> 16) & 0xffff);
3554 }
3555
3556 if (iVerbosity && cExtMax >= 7)
3557 {
3558 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[7].edx;
3559
3560 pHlp->pfnPrintf(pHlp, "APM Features: ");
3561 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " TS");
3562 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " FID");
3563 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " VID");
3564 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " TTP");
3565 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TM");
3566 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " STC");
3567 for (unsigned iBit = 6; iBit < 32; iBit++)
3568 if (uEDX & RT_BIT(iBit))
3569 pHlp->pfnPrintf(pHlp, " %d", iBit);
3570 pHlp->pfnPrintf(pHlp, "\n");
3571 }
3572
3573 if (iVerbosity && cExtMax >= 8)
3574 {
3575 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[8].eax;
3576 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[8].ecx;
3577
3578 pHlp->pfnPrintf(pHlp,
3579 "Physical Address Width: %d bits\n"
3580 "Virtual Address Width: %d bits\n"
3581 "Guest Physical Address Width: %d bits\n",
3582 (uEAX >> 0) & 0xff,
3583 (uEAX >> 8) & 0xff,
3584 (uEAX >> 16) & 0xff);
3585 pHlp->pfnPrintf(pHlp,
3586 "Physical Core Count: %d\n",
3587 (uECX >> 0) & 0xff);
3588 }
3589
3590
3591 /*
3592 * Centaur.
3593 */
3594 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdCentaur[0].eax & 0xffff;
3595
3596 pHlp->pfnPrintf(pHlp,
3597 "\n"
3598 " RAW Centaur CPUIDs\n"
3599 " Function eax ebx ecx edx\n");
3600 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); i++)
3601 {
3602 Guest = pVM->cpum.s.aGuestCpuIdCentaur[i];
3603 ASMCpuId(0xc0000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3604
3605 pHlp->pfnPrintf(pHlp,
3606 "Gst: %08x %08x %08x %08x %08x%s\n"
3607 "Hst: %08x %08x %08x %08x\n",
3608 0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
3609 i <= cCentaurMax ? "" : "*",
3610 Host.eax, Host.ebx, Host.ecx, Host.edx);
3611 }
3612
3613 /*
3614 * Understandable output
3615 */
3616 if (iVerbosity)
3617 {
3618 Guest = pVM->cpum.s.aGuestCpuIdCentaur[0];
3619 pHlp->pfnPrintf(pHlp,
3620 "Centaur Supports: 0xc0000000-%#010x\n",
3621 Guest.eax);
3622 }
3623
3624 if (iVerbosity && cCentaurMax >= 1)
3625 {
3626 ASMCpuId(0xc0000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3627 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdExt[1].edx;
3628 uint32_t uEdxHst = Host.edx;
3629
3630 if (iVerbosity == 1)
3631 {
3632 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
3633 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
3634 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
3635 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
3636 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
3637 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
3638 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
3639 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
3640 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
3641 /* possibly indicating MM/HE and MM/HE-E on older chips... */
3642 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
3643 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
3644 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
3645 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
3646 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
3647 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
3648 for (unsigned iBit = 14; iBit < 32; iBit++)
3649 if (uEdxGst & RT_BIT(iBit))
3650 pHlp->pfnPrintf(pHlp, " %d", iBit);
3651 pHlp->pfnPrintf(pHlp, "\n");
3652 }
3653 else
3654 {
3655 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
3656 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
3657 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
3658 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
3659 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
3660 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
3661 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
3662 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
3663 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
3664 /* possibly indicating MM/HE and MM/HE-E on older chips... */
3665 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
3666 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
3667 pHlp->pfnPrintf(pHlp, "PHE - Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
3668 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
3669 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
3670 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
3671 for (unsigned iBit = 14; iBit < 32; iBit++)
3672 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
3673 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
3674 pHlp->pfnPrintf(pHlp, "\n");
3675 }
3676 }
3677}
3678
3679
3680/**
3681 * Structure used when disassembling and instructions in DBGF.
3682 * This is used so the reader function can get the stuff it needs.
3683 */
3684typedef struct CPUMDISASSTATE
3685{
3686 /** Pointer to the CPU structure. */
3687 PDISCPUSTATE pCpu;
3688 /** Pointer to the VM. */
3689 PVM pVM;
3690 /** Pointer to the VMCPU. */
3691 PVMCPU pVCpu;
3692 /** Pointer to the first byte in the segment. */
3693 RTGCUINTPTR GCPtrSegBase;
3694 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
3695 RTGCUINTPTR GCPtrSegEnd;
3696 /** The size of the segment minus 1. */
3697 RTGCUINTPTR cbSegLimit;
3698 /** Pointer to the current page - R3 Ptr. */
3699 void const *pvPageR3;
3700 /** Pointer to the current page - GC Ptr. */
3701 RTGCPTR pvPageGC;
3702 /** The lock information that PGMPhysReleasePageMappingLock needs. */
3703 PGMPAGEMAPLOCK PageMapLock;
3704 /** Whether the PageMapLock is valid or not. */
3705 bool fLocked;
3706 /** 64 bits mode or not. */
3707 bool f64Bits;
3708} CPUMDISASSTATE, *PCPUMDISASSTATE;
3709
3710
3711/**
3712 * @callback_method_impl{FNDISREADBYTES}
3713 */
3714static DECLCALLBACK(int) cpumR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
3715{
3716 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pDis->pvUser;
3717 for (;;)
3718 {
3719 RTGCUINTPTR GCPtr = pDis->uInstrAddr + offInstr + pState->GCPtrSegBase;
3720
3721 /*
3722 * Need to update the page translation?
3723 */
3724 if ( !pState->pvPageR3
3725 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
3726 {
3727 int rc = VINF_SUCCESS;
3728
3729 /* translate the address */
3730 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
3731 if ( MMHyperIsInsideArea(pState->pVM, pState->pvPageGC)
3732 && !HWACCMIsEnabled(pState->pVM))
3733 {
3734 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
3735 if (!pState->pvPageR3)
3736 rc = VERR_INVALID_POINTER;
3737 }
3738 else
3739 {
3740 /* Release mapping lock previously acquired. */
3741 if (pState->fLocked)
3742 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
3743 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->pvPageGC, &pState->pvPageR3, &pState->PageMapLock);
3744 pState->fLocked = RT_SUCCESS_NP(rc);
3745 }
3746 if (RT_FAILURE(rc))
3747 {
3748 pState->pvPageR3 = NULL;
3749 return rc;
3750 }
3751 }
3752
3753 /*
3754 * Check the segment limit.
3755 */
3756 if (!pState->f64Bits && pDis->uInstrAddr + offInstr > pState->cbSegLimit)
3757 return VERR_OUT_OF_SELECTOR_BOUNDS;
3758
3759 /*
3760 * Calc how much we can read.
3761 */
3762 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
3763 if (!pState->f64Bits)
3764 {
3765 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
3766 if (cb > cbSeg && cbSeg)
3767 cb = cbSeg;
3768 }
3769 if (cb > cbMaxRead)
3770 cb = cbMaxRead;
3771
3772 /*
3773 * Read and advance or exit.
3774 */
3775 memcpy(&pDis->abInstr[offInstr], (uint8_t *)pState->pvPageR3 + (GCPtr & PAGE_OFFSET_MASK), cb);
3776 offInstr += (uint8_t)cb;
3777 if (cb >= cbMinRead)
3778 {
3779 pDis->cbCachedInstr = offInstr;
3780 return VINF_SUCCESS;
3781 }
3782 cbMinRead -= (uint8_t)cb;
3783 cbMaxRead -= (uint8_t)cb;
3784 }
3785}
3786
3787
3788/**
3789 * Disassemble an instruction and return the information in the provided structure.
3790 *
3791 * @returns VBox status code.
3792 * @param pVM Pointer to the VM.
3793 * @param pVCpu Pointer to the VMCPU.
3794 * @param pCtx Pointer to the guest CPU context.
3795 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
3796 * @param pCpu Disassembly state.
3797 * @param pszPrefix String prefix for logging (debug only).
3798 *
3799 */
3800VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
3801{
3802 CPUMDISASSTATE State;
3803 int rc;
3804
3805 const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
3806 State.pCpu = pCpu;
3807 State.pvPageGC = 0;
3808 State.pvPageR3 = NULL;
3809 State.pVM = pVM;
3810 State.pVCpu = pVCpu;
3811 State.fLocked = false;
3812 State.f64Bits = false;
3813
3814 /*
3815 * Get selector information.
3816 */
3817 DISCPUMODE enmDisCpuMode;
3818 if ( (pCtx->cr0 & X86_CR0_PE)
3819 && pCtx->eflags.Bits.u1VM == 0)
3820 {
3821 if (CPUMAreHiddenSelRegsValid(pVCpu))
3822 {
3823 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->cs.Attr.n.u1Long;
3824 State.GCPtrSegBase = pCtx->cs.u64Base;
3825 State.GCPtrSegEnd = pCtx->cs.u32Limit + 1 + (RTGCUINTPTR)pCtx->cs.u64Base;
3826 State.cbSegLimit = pCtx->cs.u32Limit;
3827 enmDisCpuMode = (State.f64Bits)
3828 ? DISCPUMODE_64BIT
3829 : pCtx->cs.Attr.n.u1DefBig
3830 ? DISCPUMODE_32BIT
3831 : DISCPUMODE_16BIT;
3832 }
3833 else
3834 {
3835 DBGFSELINFO SelInfo;
3836
3837 rc = SELMR3GetShadowSelectorInfo(pVM, pCtx->cs.Sel, &SelInfo);
3838 if (RT_FAILURE(rc))
3839 {
3840 AssertMsgFailed(("SELMR3GetShadowSelectorInfo failed for %04X:%RGv rc=%d\n", pCtx->cs.Sel, GCPtrPC, rc));
3841 return rc;
3842 }
3843
3844 /*
3845 * Validate the selector.
3846 */
3847 rc = DBGFR3SelInfoValidateCS(&SelInfo, pCtx->ss.Sel);
3848 if (RT_FAILURE(rc))
3849 {
3850 AssertMsgFailed(("SELMSelInfoValidateCS failed for %04X:%RGv rc=%d\n", pCtx->cs.Sel, GCPtrPC, rc));
3851 return rc;
3852 }
3853 State.GCPtrSegBase = SelInfo.GCPtrBase;
3854 State.GCPtrSegEnd = SelInfo.cbLimit + 1 + (RTGCUINTPTR)SelInfo.GCPtrBase;
3855 State.cbSegLimit = SelInfo.cbLimit;
3856 enmDisCpuMode = SelInfo.u.Raw.Gen.u1DefBig ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
3857 }
3858 }
3859 else
3860 {
3861 /* real or V86 mode */
3862 enmDisCpuMode = DISCPUMODE_16BIT;
3863 State.GCPtrSegBase = pCtx->cs.Sel * 16;
3864 State.GCPtrSegEnd = 0xFFFFFFFF;
3865 State.cbSegLimit = 0xFFFFFFFF;
3866 }
3867
3868 /*
3869 * Disassemble the instruction.
3870 */
3871 uint32_t cbInstr;
3872#ifndef LOG_ENABLED
3873 rc = DISInstrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State, pCpu, &cbInstr);
3874 if (RT_SUCCESS(rc))
3875 {
3876#else
3877 char szOutput[160];
3878 rc = DISInstrToStrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State,
3879 pCpu, &cbInstr, szOutput, sizeof(szOutput));
3880 if (RT_SUCCESS(rc))
3881 {
3882 /* log it */
3883 if (pszPrefix)
3884 Log(("%s-CPU%d: %s", pszPrefix, pVCpu->idCpu, szOutput));
3885 else
3886 Log(("%s", szOutput));
3887#endif
3888 rc = VINF_SUCCESS;
3889 }
3890 else
3891 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%RGv rc=%Rrc\n", pCtx->cs.Sel, GCPtrPC, rc));
3892
3893 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
3894 if (State.fLocked)
3895 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
3896
3897 return rc;
3898}
3899
3900
3901
3902/**
3903 * API for controlling a few of the CPU features found in CR4.
3904 *
3905 * Currently only X86_CR4_TSD is accepted as input.
3906 *
3907 * @returns VBox status code.
3908 *
3909 * @param pVM Pointer to the VM.
3910 * @param fOr The CR4 OR mask.
3911 * @param fAnd The CR4 AND mask.
3912 */
3913VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
3914{
3915 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
3916 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
3917
3918 pVM->cpum.s.CR4.OrMask &= fAnd;
3919 pVM->cpum.s.CR4.OrMask |= fOr;
3920
3921 return VINF_SUCCESS;
3922}
3923
3924
3925/**
3926 * Gets a pointer to the array of standard CPUID leaves.
3927 *
3928 * CPUMR3GetGuestCpuIdStdMax() give the size of the array.
3929 *
3930 * @returns Pointer to the standard CPUID leaves (read-only).
3931 * @param pVM Pointer to the VM.
3932 * @remark Intended for PATM.
3933 */
3934VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdStdRCPtr(PVM pVM)
3935{
3936 return RCPTRTYPE(PCCPUMCPUID)VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
3937}
3938
3939
3940/**
3941 * Gets a pointer to the array of extended CPUID leaves.
3942 *
3943 * CPUMGetGuestCpuIdExtMax() give the size of the array.
3944 *
3945 * @returns Pointer to the extended CPUID leaves (read-only).
3946 * @param pVM Pointer to the VM.
3947 * @remark Intended for PATM.
3948 */
3949VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdExtRCPtr(PVM pVM)
3950{
3951 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
3952}
3953
3954
3955/**
3956 * Gets a pointer to the array of centaur CPUID leaves.
3957 *
3958 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
3959 *
3960 * @returns Pointer to the centaur CPUID leaves (read-only).
3961 * @param pVM Pointer to the VM.
3962 * @remark Intended for PATM.
3963 */
3964VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdCentaurRCPtr(PVM pVM)
3965{
3966 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
3967}
3968
3969
3970/**
3971 * Gets a pointer to the default CPUID leaf.
3972 *
3973 * @returns Pointer to the default CPUID leaf (read-only).
3974 * @param pVM Pointer to the VM.
3975 * @remark Intended for PATM.
3976 */
3977VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdDefRCPtr(PVM pVM)
3978{
3979 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
3980}
3981
3982
3983/**
3984 * Transforms the guest CPU state to raw-ring mode.
3985 *
3986 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
3987 *
3988 * @returns VBox status. (recompiler failure)
3989 * @param pVCpu Pointer to the VMCPU.
3990 * @param pCtxCore The context core (for trap usage).
3991 * @see @ref pg_raw
3992 */
3993VMMR3DECL(int) CPUMR3RawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
3994{
3995 PVM pVM = pVCpu->CTX_SUFF(pVM);
3996
3997 Assert(!pVCpu->cpum.s.fRawEntered);
3998 Assert(!pVCpu->cpum.s.fRemEntered);
3999 if (!pCtxCore)
4000 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
4001
4002 /*
4003 * Are we in Ring-0?
4004 */
4005 if ( pCtxCore->ss.Sel && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
4006 && !pCtxCore->eflags.Bits.u1VM)
4007 {
4008 /*
4009 * Enter execution mode.
4010 */
4011 PATMRawEnter(pVM, pCtxCore);
4012
4013 /*
4014 * Set CPL to Ring-1.
4015 */
4016 pCtxCore->ss.Sel |= 1;
4017 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
4018 pCtxCore->cs.Sel |= 1;
4019 }
4020 else
4021 {
4022 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
4023 ("ring-1 code not supported\n"));
4024 /*
4025 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
4026 */
4027 PATMRawEnter(pVM, pCtxCore);
4028 }
4029
4030 /*
4031 * Invalidate the hidden registers.
4032 */
4033 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
4034
4035 /*
4036 * Assert sanity.
4037 */
4038 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
4039 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL)
4040 || pCtxCore->eflags.Bits.u1VM,
4041 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
4042 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
4043
4044 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
4045
4046 pVCpu->cpum.s.fRawEntered = true;
4047 return VINF_SUCCESS;
4048}
4049
4050
4051/**
4052 * Transforms the guest CPU state from raw-ring mode to correct values.
4053 *
4054 * This function will change any selector registers with DPL=1 to DPL=0.
4055 *
4056 * @returns Adjusted rc.
4057 * @param pVCpu Pointer to the VMCPU.
4058 * @param rc Raw mode return code
4059 * @param pCtxCore The context core (for trap usage).
4060 * @see @ref pg_raw
4061 */
4062VMMR3DECL(int) CPUMR3RawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
4063{
4064 PVM pVM = pVCpu->CTX_SUFF(pVM);
4065
4066 /*
4067 * Don't leave if we've already left (in GC).
4068 */
4069 Assert(pVCpu->cpum.s.fRawEntered);
4070 Assert(!pVCpu->cpum.s.fRemEntered);
4071 if (!pVCpu->cpum.s.fRawEntered)
4072 return rc;
4073 pVCpu->cpum.s.fRawEntered = false;
4074
4075 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
4076 if (!pCtxCore)
4077 pCtxCore = CPUMCTX2CORE(pCtx);
4078 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
4079 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
4080 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
4081
4082 /*
4083 * Are we executing in raw ring-1?
4084 */
4085 if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
4086 && !pCtxCore->eflags.Bits.u1VM)
4087 {
4088 /*
4089 * Leave execution mode.
4090 */
4091 PATMRawLeave(pVM, pCtxCore, rc);
4092 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
4093 /** @todo See what happens if we remove this. */
4094 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
4095 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
4096 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
4097 pCtxCore->es.Sel &= ~X86_SEL_RPL;
4098 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
4099 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
4100 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
4101 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
4102
4103 /*
4104 * Ring-1 selector => Ring-0.
4105 */
4106 pCtxCore->ss.Sel &= ~X86_SEL_RPL;
4107 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
4108 pCtxCore->cs.Sel &= ~X86_SEL_RPL;
4109 }
4110 else
4111 {
4112 /*
4113 * PATM is taking care of the IOPL and IF flags for us.
4114 */
4115 PATMRawLeave(pVM, pCtxCore, rc);
4116 if (!pCtxCore->eflags.Bits.u1VM)
4117 {
4118 /** @todo See what happens if we remove this. */
4119 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
4120 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
4121 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
4122 pCtxCore->es.Sel &= ~X86_SEL_RPL;
4123 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
4124 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
4125 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
4126 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
4127 }
4128 }
4129
4130 return rc;
4131}
4132
4133
4134/**
4135 * Enters REM, gets and resets the changed flags (CPUM_CHANGED_*).
4136 *
4137 * Only REM should ever call this function!
4138 *
4139 * @returns The changed flags.
4140 * @param pVCpu Pointer to the VMCPU.
4141 * @param puCpl Where to return the current privilege level (CPL).
4142 */
4143VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl)
4144{
4145 Assert(!pVCpu->cpum.s.fRawEntered);
4146 Assert(!pVCpu->cpum.s.fRemEntered);
4147
4148 /*
4149 * Get the CPL first.
4150 */
4151 *puCpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
4152
4153 /*
4154 * Get and reset the flags, leaving CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID set.
4155 */
4156 uint32_t fFlags = pVCpu->cpum.s.fChanged;
4157 pVCpu->cpum.s.fChanged &= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID; /* leave it set */
4158
4159 /** @todo change the switcher to use the fChanged flags. */
4160 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
4161 {
4162 fFlags |= CPUM_CHANGED_FPU_REM;
4163 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
4164 }
4165
4166 pVCpu->cpum.s.fRemEntered = true;
4167 return fFlags;
4168}
4169
4170
4171/**
4172 * Leaves REM and works the CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID flag.
4173 *
4174 * @param pVCpu Pointer to the VMCPU.
4175 * @param fNoOutOfSyncSels This is @c false if there are out of sync
4176 * registers.
4177 */
4178VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels)
4179{
4180 Assert(!pVCpu->cpum.s.fRawEntered);
4181 Assert(pVCpu->cpum.s.fRemEntered);
4182
4183 if (fNoOutOfSyncSels)
4184 pVCpu->cpum.s.fChanged &= ~CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
4185 else
4186 pVCpu->cpum.s.fChanged |= ~CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
4187
4188 pVCpu->cpum.s.fRemEntered = false;
4189}
4190
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette