VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUM.cpp@ 46142

Last change on this file since 46142 was 46142, checked in by vboxsync, 12 years ago

VMM/CPUM: show all CPUID leaves in the log, not only the leaves we privode to the guest

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 215.1 KB
Line 
1/* $Id: CPUM.cpp 46142 2013-05-17 13:06:57Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_cpum CPUM - CPU Monitor / Manager
19 *
20 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
21 * also responsible for lazy FPU handling and some of the context loading
22 * in raw mode.
23 *
24 * There are three CPU contexts, the most important one is the guest one (GC).
25 * When running in raw-mode (RC) there is a special hyper context for the VMM
26 * part that floats around inside the guest address space. When running in
27 * raw-mode, CPUM also maintains a host context for saving and restoring
28 * registers across world switches. This latter is done in cooperation with the
29 * world switcher (@see pg_vmm).
30 *
31 * @see grp_cpum
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_CPUM
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/cpumdis.h>
40#include <VBox/vmm/cpumctx-v1_6.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/pdmapi.h>
43#include <VBox/vmm/mm.h>
44#include <VBox/vmm/em.h>
45#include <VBox/vmm/selm.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/patm.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/vmm/ssm.h>
50#include "CPUMInternal.h"
51#include <VBox/vmm/vm.h>
52
53#include <VBox/param.h>
54#include <VBox/dis.h>
55#include <VBox/err.h>
56#include <VBox/log.h>
57#include <iprt/assert.h>
58#include <iprt/asm-amd64-x86.h>
59#include <iprt/string.h>
60#include <iprt/mp.h>
61#include <iprt/cpuset.h>
62#include "internal/pgm.h"
63
64
65/*******************************************************************************
66* Defined Constants And Macros *
67*******************************************************************************/
68/** The current saved state version. */
69#define CPUM_SAVED_STATE_VERSION 14
70/** The current saved state version before using SSMR3PutStruct. */
71#define CPUM_SAVED_STATE_VERSION_MEM 13
72/** The saved state version before introducing the MSR size field. */
73#define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 12
74/** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden
75 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */
76#define CPUM_SAVED_STATE_VERSION_VER3_2 11
77/** The saved state version of 3.0 and 3.1 trunk before the teleportation
78 * changes. */
79#define CPUM_SAVED_STATE_VERSION_VER3_0 10
80/** The saved state version for the 2.1 trunk before the MSR changes. */
81#define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9
82/** The saved state version of 2.0, used for backwards compatibility. */
83#define CPUM_SAVED_STATE_VERSION_VER2_0 8
84/** The saved state version of 1.6, used for backwards compatibility. */
85#define CPUM_SAVED_STATE_VERSION_VER1_6 6
86
87
88/**
89 * This was used in the saved state up to the early life of version 14.
90 *
91 * It indicates that we may have some out-of-sync hidden segement registers.
92 * It is only relevant for raw-mode.
93 */
94#define CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID RT_BIT(12)
95
96
97/*******************************************************************************
98* Structures and Typedefs *
99*******************************************************************************/
100
101/**
102 * What kind of cpu info dump to perform.
103 */
104typedef enum CPUMDUMPTYPE
105{
106 CPUMDUMPTYPE_TERSE,
107 CPUMDUMPTYPE_DEFAULT,
108 CPUMDUMPTYPE_VERBOSE
109} CPUMDUMPTYPE;
110/** Pointer to a cpu info dump type. */
111typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
112
113
114/*******************************************************************************
115* Internal Functions *
116*******************************************************************************/
117static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
118static int cpumR3CpuIdInit(PVM pVM);
119static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
120static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
121static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM);
122static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
123static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
124static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
125static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
126static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
127static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
128static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
129static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
130
131
132/*******************************************************************************
133* Global Variables *
134*******************************************************************************/
135/** Saved state field descriptors for CPUMCTX. */
136static const SSMFIELD g_aCpumCtxFields[] =
137{
138 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
139 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
140 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
141 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
142 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
143 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
144 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
145 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
146 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
147 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
148 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
149 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
150 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
151 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
152 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
153 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
154 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
155 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
156 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
157 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
158 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
159 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
160 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
161 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
162 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
163 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
164 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
165 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
166 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
167 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
168 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
169 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
170 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
171 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
172 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
173 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
174 SSMFIELD_ENTRY( CPUMCTX, rdi),
175 SSMFIELD_ENTRY( CPUMCTX, rsi),
176 SSMFIELD_ENTRY( CPUMCTX, rbp),
177 SSMFIELD_ENTRY( CPUMCTX, rax),
178 SSMFIELD_ENTRY( CPUMCTX, rbx),
179 SSMFIELD_ENTRY( CPUMCTX, rdx),
180 SSMFIELD_ENTRY( CPUMCTX, rcx),
181 SSMFIELD_ENTRY( CPUMCTX, rsp),
182 SSMFIELD_ENTRY( CPUMCTX, rflags),
183 SSMFIELD_ENTRY( CPUMCTX, rip),
184 SSMFIELD_ENTRY( CPUMCTX, r8),
185 SSMFIELD_ENTRY( CPUMCTX, r9),
186 SSMFIELD_ENTRY( CPUMCTX, r10),
187 SSMFIELD_ENTRY( CPUMCTX, r11),
188 SSMFIELD_ENTRY( CPUMCTX, r12),
189 SSMFIELD_ENTRY( CPUMCTX, r13),
190 SSMFIELD_ENTRY( CPUMCTX, r14),
191 SSMFIELD_ENTRY( CPUMCTX, r15),
192 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
193 SSMFIELD_ENTRY( CPUMCTX, es.ValidSel),
194 SSMFIELD_ENTRY( CPUMCTX, es.fFlags),
195 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
196 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
197 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
198 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
199 SSMFIELD_ENTRY( CPUMCTX, cs.ValidSel),
200 SSMFIELD_ENTRY( CPUMCTX, cs.fFlags),
201 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
202 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
203 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
204 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
205 SSMFIELD_ENTRY( CPUMCTX, ss.ValidSel),
206 SSMFIELD_ENTRY( CPUMCTX, ss.fFlags),
207 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
208 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
209 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
210 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
211 SSMFIELD_ENTRY( CPUMCTX, ds.ValidSel),
212 SSMFIELD_ENTRY( CPUMCTX, ds.fFlags),
213 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
214 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
215 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
216 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
217 SSMFIELD_ENTRY( CPUMCTX, fs.ValidSel),
218 SSMFIELD_ENTRY( CPUMCTX, fs.fFlags),
219 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
220 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
221 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
222 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
223 SSMFIELD_ENTRY( CPUMCTX, gs.ValidSel),
224 SSMFIELD_ENTRY( CPUMCTX, gs.fFlags),
225 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
226 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
227 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
228 SSMFIELD_ENTRY( CPUMCTX, cr0),
229 SSMFIELD_ENTRY( CPUMCTX, cr2),
230 SSMFIELD_ENTRY( CPUMCTX, cr3),
231 SSMFIELD_ENTRY( CPUMCTX, cr4),
232 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
233 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
234 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
235 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
236 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
237 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
238 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
239 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
240 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
241 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
242 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
243 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
244 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
245 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
246 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
247 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
248 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
249 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
250 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
251 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
252 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
253 SSMFIELD_ENTRY( CPUMCTX, ldtr.ValidSel),
254 SSMFIELD_ENTRY( CPUMCTX, ldtr.fFlags),
255 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
256 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
257 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
258 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
259 SSMFIELD_ENTRY( CPUMCTX, tr.ValidSel),
260 SSMFIELD_ENTRY( CPUMCTX, tr.fFlags),
261 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
262 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
263 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
264 SSMFIELD_ENTRY_TERM()
265};
266
267/** Saved state field descriptors for CPUMCTX in V4.1 before the hidden selector
268 * registeres changed. */
269static const SSMFIELD g_aCpumCtxFieldsMem[] =
270{
271 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
272 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
273 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
274 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
275 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
276 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
277 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
278 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
279 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
280 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
281 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
282 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
283 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
284 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
285 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
286 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
287 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
288 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
289 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
290 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
291 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
292 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
293 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
294 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
295 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
296 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
297 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
298 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
299 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
300 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
301 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
302 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
303 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
304 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
305 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
306 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
307 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
308 SSMFIELD_ENTRY( CPUMCTX, rdi),
309 SSMFIELD_ENTRY( CPUMCTX, rsi),
310 SSMFIELD_ENTRY( CPUMCTX, rbp),
311 SSMFIELD_ENTRY( CPUMCTX, rax),
312 SSMFIELD_ENTRY( CPUMCTX, rbx),
313 SSMFIELD_ENTRY( CPUMCTX, rdx),
314 SSMFIELD_ENTRY( CPUMCTX, rcx),
315 SSMFIELD_ENTRY( CPUMCTX, rsp),
316 SSMFIELD_ENTRY_OLD( lss_esp, sizeof(uint32_t)),
317 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
318 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
319 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
320 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
321 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
322 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
323 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
324 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
325 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
326 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
327 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
328 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
329 SSMFIELD_ENTRY( CPUMCTX, rflags),
330 SSMFIELD_ENTRY( CPUMCTX, rip),
331 SSMFIELD_ENTRY( CPUMCTX, r8),
332 SSMFIELD_ENTRY( CPUMCTX, r9),
333 SSMFIELD_ENTRY( CPUMCTX, r10),
334 SSMFIELD_ENTRY( CPUMCTX, r11),
335 SSMFIELD_ENTRY( CPUMCTX, r12),
336 SSMFIELD_ENTRY( CPUMCTX, r13),
337 SSMFIELD_ENTRY( CPUMCTX, r14),
338 SSMFIELD_ENTRY( CPUMCTX, r15),
339 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
340 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
341 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
342 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
343 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
344 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
345 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
346 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
347 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
348 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
349 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
350 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
351 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
352 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
353 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
354 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
355 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
356 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
357 SSMFIELD_ENTRY( CPUMCTX, cr0),
358 SSMFIELD_ENTRY( CPUMCTX, cr2),
359 SSMFIELD_ENTRY( CPUMCTX, cr3),
360 SSMFIELD_ENTRY( CPUMCTX, cr4),
361 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
362 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
363 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
364 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
365 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
366 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
367 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
368 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
369 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
370 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
371 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
372 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
373 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
374 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
375 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
376 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
377 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
378 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
379 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
380 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
381 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
382 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
383 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
384 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
385 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
386 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
387 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
388 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
389 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
390 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
391 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
392 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
393 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
394 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
395 SSMFIELD_ENTRY_TERM()
396};
397
398/** Saved state field descriptors for CPUMCTX_VER1_6. */
399static const SSMFIELD g_aCpumCtxFieldsV16[] =
400{
401 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
402 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
403 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
404 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
405 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
406 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
407 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
408 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
409 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
410 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
411 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
412 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
413 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
414 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
415 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
416 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
417 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
418 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
419 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
420 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
421 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
422 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
423 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
424 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
425 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
426 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
427 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
428 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
429 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
430 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
431 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
432 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
433 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
434 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
435 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
436 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
437 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
438 SSMFIELD_ENTRY( CPUMCTX, rdi),
439 SSMFIELD_ENTRY( CPUMCTX, rsi),
440 SSMFIELD_ENTRY( CPUMCTX, rbp),
441 SSMFIELD_ENTRY( CPUMCTX, rax),
442 SSMFIELD_ENTRY( CPUMCTX, rbx),
443 SSMFIELD_ENTRY( CPUMCTX, rdx),
444 SSMFIELD_ENTRY( CPUMCTX, rcx),
445 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, rsp),
446 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
447 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
448 SSMFIELD_ENTRY_OLD( CPUMCTX, sizeof(uint64_t) /*rsp_notused*/),
449 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
450 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
451 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
452 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
453 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
454 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
455 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
456 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
457 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
458 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
459 SSMFIELD_ENTRY( CPUMCTX, rflags),
460 SSMFIELD_ENTRY( CPUMCTX, rip),
461 SSMFIELD_ENTRY( CPUMCTX, r8),
462 SSMFIELD_ENTRY( CPUMCTX, r9),
463 SSMFIELD_ENTRY( CPUMCTX, r10),
464 SSMFIELD_ENTRY( CPUMCTX, r11),
465 SSMFIELD_ENTRY( CPUMCTX, r12),
466 SSMFIELD_ENTRY( CPUMCTX, r13),
467 SSMFIELD_ENTRY( CPUMCTX, r14),
468 SSMFIELD_ENTRY( CPUMCTX, r15),
469 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, es.u64Base),
470 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
471 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
472 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, cs.u64Base),
473 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
474 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
475 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ss.u64Base),
476 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
477 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
478 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ds.u64Base),
479 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
480 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
481 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, fs.u64Base),
482 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
483 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
484 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gs.u64Base),
485 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
486 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
487 SSMFIELD_ENTRY( CPUMCTX, cr0),
488 SSMFIELD_ENTRY( CPUMCTX, cr2),
489 SSMFIELD_ENTRY( CPUMCTX, cr3),
490 SSMFIELD_ENTRY( CPUMCTX, cr4),
491 SSMFIELD_ENTRY_OLD( cr8, sizeof(uint64_t)),
492 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
493 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
494 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
495 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
496 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
497 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
498 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
499 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
500 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
501 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gdtr.pGdt),
502 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
503 SSMFIELD_ENTRY_OLD( gdtrPadding64, sizeof(uint64_t)),
504 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
505 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, idtr.pIdt),
506 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
507 SSMFIELD_ENTRY_OLD( idtrPadding64, sizeof(uint64_t)),
508 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
509 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
510 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
511 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
512 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
513 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
514 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
515 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
516 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
517 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
518 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
519 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
520 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
521 SSMFIELD_ENTRY_OLD( msrFSBASE, sizeof(uint64_t)),
522 SSMFIELD_ENTRY_OLD( msrGSBASE, sizeof(uint64_t)),
523 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
524 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ldtr.u64Base),
525 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
526 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
527 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, tr.u64Base),
528 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
529 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
530 SSMFIELD_ENTRY_OLD( padding, sizeof(uint32_t)*2),
531 SSMFIELD_ENTRY_TERM()
532};
533
534
535/**
536 * Initializes the CPUM.
537 *
538 * @returns VBox status code.
539 * @param pVM Pointer to the VM.
540 */
541VMMR3DECL(int) CPUMR3Init(PVM pVM)
542{
543 LogFlow(("CPUMR3Init\n"));
544
545 /*
546 * Assert alignment and sizes.
547 */
548 AssertCompileMemberAlignment(VM, cpum.s, 32);
549 AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
550 AssertCompileSizeAlignment(CPUMCTX, 64);
551 AssertCompileSizeAlignment(CPUMCTXMSRS, 64);
552 AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
553 AssertCompileMemberAlignment(VM, cpum, 64);
554 AssertCompileMemberAlignment(VM, aCpus, 64);
555 AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
556 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
557
558 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
559 pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
560 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
561
562 /* Calculate the offset from CPUMCPU to CPUM. */
563 for (VMCPUID i = 0; i < pVM->cCpus; i++)
564 {
565 PVMCPU pVCpu = &pVM->aCpus[i];
566
567 pVCpu->cpum.s.offCPUM = RT_OFFSETOF(VM, aCpus[i].cpum) - RT_OFFSETOF(VM, cpum);
568 Assert((uintptr_t)&pVCpu->cpum - pVCpu->cpum.s.offCPUM == (uintptr_t)&pVM->cpum);
569 }
570
571 /*
572 * Check that the CPU supports the minimum features we require.
573 */
574 if (!ASMHasCpuId())
575 {
576 Log(("The CPU doesn't support CPUID!\n"));
577 return VERR_UNSUPPORTED_CPU;
578 }
579 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
580 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
581
582 /* Setup the CR4 AND and OR masks used in the switcher */
583 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
584 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
585 {
586 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
587 /* No FXSAVE implies no SSE */
588 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
589 pVM->cpum.s.CR4.OrMask = 0;
590 }
591 else
592 {
593 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
594 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
595 }
596
597 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
598 {
599 Log(("The CPU doesn't support MMX!\n"));
600 return VERR_UNSUPPORTED_CPU;
601 }
602 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
603 {
604 Log(("The CPU doesn't support TSC!\n"));
605 return VERR_UNSUPPORTED_CPU;
606 }
607 /* Bogus on AMD? */
608 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
609 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
610
611 /*
612 * Detect the host CPU vendor.
613 * (The guest CPU vendor is re-detected later on.)
614 */
615 uint32_t uEAX, uEBX, uECX, uEDX;
616 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
617 pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX);
618 pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor;
619
620 /*
621 * Setup hypervisor startup values.
622 */
623
624 /*
625 * Register saved state data item.
626 */
627 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
628 NULL, cpumR3LiveExec, NULL,
629 NULL, cpumR3SaveExec, NULL,
630 cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
631 if (RT_FAILURE(rc))
632 return rc;
633
634 /*
635 * Register info handlers and registers with the debugger facility.
636 */
637 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
638 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
639 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
640 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
641 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
642 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
643
644 rc = cpumR3DbgInit(pVM);
645 if (RT_FAILURE(rc))
646 return rc;
647
648 /*
649 * Initialize the Guest CPUID state.
650 */
651 rc = cpumR3CpuIdInit(pVM);
652 if (RT_FAILURE(rc))
653 return rc;
654 CPUMR3Reset(pVM);
655 return VINF_SUCCESS;
656}
657
658
659/**
660 * Detect the CPU vendor give n the
661 *
662 * @returns The vendor.
663 * @param uEAX EAX from CPUID(0).
664 * @param uEBX EBX from CPUID(0).
665 * @param uECX ECX from CPUID(0).
666 * @param uEDX EDX from CPUID(0).
667 */
668static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
669{
670 if (ASMIsValidStdRange(uEAX))
671 {
672 if (ASMIsAmdCpuEx(uEBX, uECX, uEDX))
673 return CPUMCPUVENDOR_AMD;
674
675 if (ASMIsIntelCpuEx(uEBX, uECX, uEDX))
676 return CPUMCPUVENDOR_INTEL;
677
678 if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX))
679 return CPUMCPUVENDOR_VIA;
680
681 /** @todo detect the other buggers... */
682 }
683
684 return CPUMCPUVENDOR_UNKNOWN;
685}
686
687
688/**
689 * Fetches overrides for a CPUID leaf.
690 *
691 * @returns VBox status code.
692 * @param pLeaf The leaf to load the overrides into.
693 * @param pCfgNode The CFGM node containing the overrides
694 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
695 * @param iLeaf The CPUID leaf number.
696 */
697static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)
698{
699 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);
700 if (pLeafNode)
701 {
702 uint32_t u32;
703 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
704 if (RT_SUCCESS(rc))
705 pLeaf->eax = u32;
706 else
707 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
708
709 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
710 if (RT_SUCCESS(rc))
711 pLeaf->ebx = u32;
712 else
713 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
714
715 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
716 if (RT_SUCCESS(rc))
717 pLeaf->ecx = u32;
718 else
719 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
720
721 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
722 if (RT_SUCCESS(rc))
723 pLeaf->edx = u32;
724 else
725 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
726
727 }
728 return VINF_SUCCESS;
729}
730
731
732/**
733 * Load the overrides for a set of CPUID leaves.
734 *
735 * @returns VBox status code.
736 * @param paLeaves The leaf array.
737 * @param cLeaves The number of leaves.
738 * @param uStart The start leaf number.
739 * @param pCfgNode The CFGM node containing the overrides
740 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
741 */
742static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
743{
744 for (uint32_t i = 0; i < cLeaves; i++)
745 {
746 int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i);
747 if (RT_FAILURE(rc))
748 return rc;
749 }
750
751 return VINF_SUCCESS;
752}
753
754/**
755 * Init a set of host CPUID leaves.
756 *
757 * @returns VBox status code.
758 * @param paLeaves The leaf array.
759 * @param cLeaves The number of leaves.
760 * @param uStart The start leaf number.
761 * @param pCfgNode The /CPUM/HostCPUID/ node.
762 */
763static int cpumR3CpuIdInitHostSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
764{
765 /* Using the ECX variant for all of them can't hurt... */
766 for (uint32_t i = 0; i < cLeaves; i++)
767 ASMCpuId_Idx_ECX(uStart + i, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);
768
769 /* Load CPUID leaf override; we currently don't care if the user
770 specifies features the host CPU doesn't support. */
771 return cpumR3CpuIdInitLoadOverrideSet(uStart, paLeaves, cLeaves, pCfgNode);
772}
773
774
775/**
776 * Initializes the emulated CPU's cpuid information.
777 *
778 * @returns VBox status code.
779 * @param pVM Pointer to the VM.
780 */
781static int cpumR3CpuIdInit(PVM pVM)
782{
783 PCPUM pCPUM = &pVM->cpum.s;
784 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
785 uint32_t i;
786 int rc;
787
788#define PORTABLE_CLEAR_BITS_WHEN(Lvl, LeafSuffReg, FeatNm, fMask, uValue) \
789 if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fMask)) == (uValue) ) \
790 { \
791 LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: %#x -> 0\n", pCPUM->aGuestCpuId##LeafSuffReg & (fMask))); \
792 pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fMask); \
793 }
794#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, LeafSuffReg, FeatNm, fBitMask) \
795 if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fBitMask)) ) \
796 { \
797 LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: 1 -> 0\n")); \
798 pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fBitMask); \
799 }
800
801 /*
802 * Read the configuration.
803 */
804 /** @cfgm{CPUM/SyntheticCpu, boolean, false}
805 * Enables the Synthetic CPU. The Vendor ID and Processor Name are
806 * completely overridden by VirtualBox custom strings. Some
807 * CPUID information is withheld, like the cache info. */
808 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &pCPUM->fSyntheticCpu, false);
809 AssertRCReturn(rc, rc);
810
811 /** @cfgm{CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}
812 * When non-zero CPUID features that could cause portability issues will be
813 * stripped. The higher the value the more features gets stripped. Higher
814 * values should only be used when older CPUs are involved since it may
815 * harm performance and maybe also cause problems with specific guests. */
816 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, 0);
817 AssertRCReturn(rc, rc);
818
819 AssertLogRelReturn(!pCPUM->fSyntheticCpu || !pCPUM->u8PortableCpuIdLevel, VERR_CPUM_INCOMPATIBLE_CONFIG);
820
821 /*
822 * Get the host CPUID leaves and redetect the guest CPU vendor (could've
823 * been overridden).
824 */
825 /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
826 * Overrides the host CPUID leaf values used for calculating the guest CPUID
827 * leaves. This can be used to preserve the CPUID values when moving a VM to a
828 * different machine. Another use is restricting (or extending) the feature set
829 * exposed to the guest. */
830 PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID");
831 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pHostOverrideCfg);
832 AssertRCReturn(rc, rc);
833 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pHostOverrideCfg);
834 AssertRCReturn(rc, rc);
835 rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg);
836 AssertRCReturn(rc, rc);
837
838 pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx,
839 pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx);
840
841 /*
842 * Determine the default leaf.
843 *
844 * Intel returns values of the highest standard function, while AMD
845 * returns zeros. VIA on the other hand seems to returning nothing or
846 * perhaps some random garbage, we don't try to duplicate this behavior.
847 */
848 ASMCpuId(pCPUM->aGuestCpuIdStd[0].eax + 10, /** @todo r=bird: Use the host value here in case of overrides and more than 10 leaves being stripped already. */
849 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
850 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
851
852 /** @cfgm{/CPUM/CMPXCHG16B, boolean, false}
853 * Expose CMPXCHG16B to the guest if supported by the host.
854 */
855 bool fCmpXchg16b;
856 rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); AssertRCReturn(rc, rc);
857
858 bool fMonitor;
859 rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); AssertRCReturn(rc, rc);
860
861 /* Cpuid 1 & 0x80000001:
862 * Only report features we can support.
863 *
864 * Note! When enabling new features the Synthetic CPU and Portable CPUID
865 * options may require adjusting (i.e. stripping what was enabled).
866 */
867 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
868 | X86_CPUID_FEATURE_EDX_VME
869 | X86_CPUID_FEATURE_EDX_DE
870 | X86_CPUID_FEATURE_EDX_PSE
871 | X86_CPUID_FEATURE_EDX_TSC
872 | X86_CPUID_FEATURE_EDX_MSR
873 //| X86_CPUID_FEATURE_EDX_PAE - set later if configured.
874 | X86_CPUID_FEATURE_EDX_MCE
875 | X86_CPUID_FEATURE_EDX_CX8
876 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
877 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */
878 //| X86_CPUID_FEATURE_EDX_SEP
879 | X86_CPUID_FEATURE_EDX_MTRR
880 | X86_CPUID_FEATURE_EDX_PGE
881 | X86_CPUID_FEATURE_EDX_MCA
882 | X86_CPUID_FEATURE_EDX_CMOV
883 | X86_CPUID_FEATURE_EDX_PAT
884 | X86_CPUID_FEATURE_EDX_PSE36
885 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
886 | X86_CPUID_FEATURE_EDX_CLFSH
887 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
888 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.
889 | X86_CPUID_FEATURE_EDX_MMX
890 | X86_CPUID_FEATURE_EDX_FXSR
891 | X86_CPUID_FEATURE_EDX_SSE
892 | X86_CPUID_FEATURE_EDX_SSE2
893 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
894 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.
895 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
896 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
897 | 0;
898 pCPUM->aGuestCpuIdStd[1].ecx &= 0
899 | X86_CPUID_FEATURE_ECX_SSE3
900 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
901 | ((fMonitor && pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
902 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
903 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
904 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
905 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
906 | X86_CPUID_FEATURE_ECX_SSSE3
907 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
908 | (fCmpXchg16b ? X86_CPUID_FEATURE_ECX_CX16 : 0)
909 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
910 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
911 /* ECX Bit 21 - x2APIC support - not yet. */
912 // | X86_CPUID_FEATURE_ECX_X2APIC
913 /* ECX Bit 23 - POPCNT instruction. */
914 //| X86_CPUID_FEATURE_ECX_POPCNT
915 | 0;
916 if (pCPUM->u8PortableCpuIdLevel > 0)
917 {
918 PORTABLE_CLEAR_BITS_WHEN(1, Std[1].eax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
919 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
920 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
921 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, CX16, X86_CPUID_FEATURE_ECX_CX16);
922 PORTABLE_DISABLE_FEATURE_BIT(2, Std[1].edx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
923 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, SSE, X86_CPUID_FEATURE_EDX_SSE);
924 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
925 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
926
927 Assert(!(pCPUM->aGuestCpuIdStd[1].edx & ( X86_CPUID_FEATURE_EDX_SEP
928 | X86_CPUID_FEATURE_EDX_PSN
929 | X86_CPUID_FEATURE_EDX_DS
930 | X86_CPUID_FEATURE_EDX_ACPI
931 | X86_CPUID_FEATURE_EDX_SS
932 | X86_CPUID_FEATURE_EDX_TM
933 | X86_CPUID_FEATURE_EDX_PBE
934 )));
935 Assert(!(pCPUM->aGuestCpuIdStd[1].ecx & ( X86_CPUID_FEATURE_ECX_PCLMUL
936 | X86_CPUID_FEATURE_ECX_DTES64
937 | X86_CPUID_FEATURE_ECX_CPLDS
938 | X86_CPUID_FEATURE_ECX_VMX
939 | X86_CPUID_FEATURE_ECX_SMX
940 | X86_CPUID_FEATURE_ECX_EST
941 | X86_CPUID_FEATURE_ECX_TM2
942 | X86_CPUID_FEATURE_ECX_CNTXID
943 | X86_CPUID_FEATURE_ECX_FMA
944 | X86_CPUID_FEATURE_ECX_CX16
945 | X86_CPUID_FEATURE_ECX_TPRUPDATE
946 | X86_CPUID_FEATURE_ECX_PDCM
947 | X86_CPUID_FEATURE_ECX_DCA
948 | X86_CPUID_FEATURE_ECX_MOVBE
949 | X86_CPUID_FEATURE_ECX_AES
950 | X86_CPUID_FEATURE_ECX_POPCNT
951 | X86_CPUID_FEATURE_ECX_XSAVE
952 | X86_CPUID_FEATURE_ECX_OSXSAVE
953 | X86_CPUID_FEATURE_ECX_AVX
954 )));
955 }
956
957 /* Cpuid 0x80000001:
958 * Only report features we can support.
959 *
960 * Note! When enabling new features the Synthetic CPU and Portable CPUID
961 * options may require adjusting (i.e. stripping what was enabled).
962 *
963 * ASSUMES that this is ALWAYS the AMD defined feature set if present.
964 */
965 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
966 | X86_CPUID_AMD_FEATURE_EDX_VME
967 | X86_CPUID_AMD_FEATURE_EDX_DE
968 | X86_CPUID_AMD_FEATURE_EDX_PSE
969 | X86_CPUID_AMD_FEATURE_EDX_TSC
970 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
971 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.
972 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
973 | X86_CPUID_AMD_FEATURE_EDX_CX8
974 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
975 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */
976 //| X86_CPUID_EXT_FEATURE_EDX_SEP
977 | X86_CPUID_AMD_FEATURE_EDX_MTRR
978 | X86_CPUID_AMD_FEATURE_EDX_PGE
979 | X86_CPUID_AMD_FEATURE_EDX_MCA
980 | X86_CPUID_AMD_FEATURE_EDX_CMOV
981 | X86_CPUID_AMD_FEATURE_EDX_PAT
982 | X86_CPUID_AMD_FEATURE_EDX_PSE36
983 //| X86_CPUID_EXT_FEATURE_EDX_NX - not virtualized, requires PAE.
984 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
985 | X86_CPUID_AMD_FEATURE_EDX_MMX
986 | X86_CPUID_AMD_FEATURE_EDX_FXSR
987 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
988 //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
989 | X86_CPUID_EXT_FEATURE_EDX_RDTSCP
990 //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary
991 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
992 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
993 | 0;
994 pCPUM->aGuestCpuIdExt[1].ecx &= 0
995 //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
996 //| X86_CPUID_AMD_FEATURE_ECX_CMPL
997 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
998 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
999 /* Note: This could prevent teleporting from AMD to Intel CPUs! */
1000 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
1001 //| X86_CPUID_AMD_FEATURE_ECX_ABM
1002 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A
1003 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
1004 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
1005 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
1006 //| X86_CPUID_AMD_FEATURE_ECX_IBS
1007 //| X86_CPUID_AMD_FEATURE_ECX_SSE5
1008 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
1009 //| X86_CPUID_AMD_FEATURE_ECX_WDT
1010 | 0;
1011 if (pCPUM->u8PortableCpuIdLevel > 0)
1012 {
1013 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].ecx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
1014 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
1015 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
1016 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
1017 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
1018 PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
1019 PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
1020
1021 Assert(!(pCPUM->aGuestCpuIdExt[1].ecx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL
1022 | X86_CPUID_AMD_FEATURE_ECX_SVM
1023 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
1024 | X86_CPUID_AMD_FEATURE_ECX_CR8L
1025 | X86_CPUID_AMD_FEATURE_ECX_ABM
1026 | X86_CPUID_AMD_FEATURE_ECX_SSE4A
1027 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
1028 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
1029 | X86_CPUID_AMD_FEATURE_ECX_OSVW
1030 | X86_CPUID_AMD_FEATURE_ECX_IBS
1031 | X86_CPUID_AMD_FEATURE_ECX_SSE5
1032 | X86_CPUID_AMD_FEATURE_ECX_SKINIT
1033 | X86_CPUID_AMD_FEATURE_ECX_WDT
1034 | UINT32_C(0xffffc000)
1035 )));
1036 Assert(!(pCPUM->aGuestCpuIdExt[1].edx & ( RT_BIT(10)
1037 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
1038 | RT_BIT(18)
1039 | RT_BIT(19)
1040 | RT_BIT(21)
1041 | X86_CPUID_AMD_FEATURE_EDX_AXMMX
1042 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
1043 | RT_BIT(28)
1044 )));
1045 }
1046
1047 /*
1048 * Apply the Synthetic CPU modifications. (TODO: move this up)
1049 */
1050 if (pCPUM->fSyntheticCpu)
1051 {
1052 static const char s_szVendor[13] = "VirtualBox ";
1053 static const char s_szProcessor[48] = "VirtualBox SPARCx86 Processor v1000 "; /* includes null terminator */
1054
1055 pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC;
1056
1057 /* Limit the nr of standard leaves; 5 for monitor/mwait */
1058 pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5);
1059
1060 /* 0: Vendor */
1061 pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)s_szVendor)[0];
1062 pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)s_szVendor)[2];
1063 pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)s_szVendor)[1];
1064
1065 /* 1.eax: Version information. family : model : stepping */
1066 pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1;
1067
1068 /* Leaves 2 - 4 are Intel only - zero them out */
1069 memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2]));
1070 memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3]));
1071 memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4]));
1072
1073 /* Leaf 5 = monitor/mwait */
1074
1075 /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */
1076 pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008);
1077 /* AMD only - set to zero. */
1078 pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
1079
1080 /* 0x800000001: shared feature bits are set dynamically. */
1081 memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
1082
1083 /* 0x800000002-4: Processor Name String Identifier. */
1084 pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)s_szProcessor)[0];
1085 pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)s_szProcessor)[1];
1086 pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)s_szProcessor)[2];
1087 pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)s_szProcessor)[3];
1088 pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)s_szProcessor)[4];
1089 pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)s_szProcessor)[5];
1090 pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)s_szProcessor)[6];
1091 pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)s_szProcessor)[7];
1092 pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)s_szProcessor)[8];
1093 pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)s_szProcessor)[9];
1094 pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)s_szProcessor)[10];
1095 pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)s_szProcessor)[11];
1096
1097 /* 0x800000005-7 - reserved -> zero */
1098 memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5]));
1099 memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6]));
1100 memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7]));
1101
1102 /* 0x800000008: only the max virtual and physical address size. */
1103 pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
1104 }
1105
1106 /*
1107 * Hide HTT, multicode, SMP, whatever.
1108 * (APIC-ID := 0 and #LogCpus := 0)
1109 */
1110 pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
1111#ifdef VBOX_WITH_MULTI_CORE
1112 if ( pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC
1113 && pVM->cCpus > 1)
1114 {
1115 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
1116 pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
1117 pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */
1118 }
1119#endif
1120
1121 /* Cpuid 2:
1122 * Intel: Cache and TLB information
1123 * AMD: Reserved
1124 * VIA: Reserved
1125 * Safe to expose; restrict the number of calls to 1 for the portable case.
1126 */
1127 if ( pCPUM->u8PortableCpuIdLevel > 0
1128 && pCPUM->aGuestCpuIdStd[0].eax >= 2
1129 && (pCPUM->aGuestCpuIdStd[2].eax & 0xff) > 1)
1130 {
1131 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCPUM->aGuestCpuIdStd[2].eax & 0xff));
1132 pCPUM->aGuestCpuIdStd[2].eax &= UINT32_C(0xfffffffe);
1133 }
1134
1135 /* Cpuid 3:
1136 * Intel: EAX, EBX - reserved (transmeta uses these)
1137 * ECX, EDX - Processor Serial Number if available, otherwise reserved
1138 * AMD: Reserved
1139 * VIA: Reserved
1140 * Safe to expose
1141 */
1142 if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
1143 {
1144 pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
1145 if (pCPUM->u8PortableCpuIdLevel > 0)
1146 pCPUM->aGuestCpuIdStd[3].eax = pCPUM->aGuestCpuIdStd[3].ebx = 0;
1147 }
1148
1149 /* Cpuid 4:
1150 * Intel: Deterministic Cache Parameters Leaf
1151 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 0
1152 * AMD: Reserved
1153 * VIA: Reserved
1154 * Safe to expose, except for EAX:
1155 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
1156 * Bits 31-26: Maximum number of processor cores in this physical package**
1157 * Note: These SMP values are constant regardless of ECX
1158 */
1159 pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
1160 pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
1161#ifdef VBOX_WITH_MULTI_CORE
1162 if ( pVM->cCpus > 1
1163 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1164 {
1165 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
1166 /* One logical processor with possibly multiple cores. */
1167 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
1168 pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */
1169 }
1170#endif
1171
1172 /* Cpuid 5: Monitor/mwait Leaf
1173 * Intel: ECX, EDX - reserved
1174 * EAX, EBX - Smallest and largest monitor line size
1175 * AMD: EDX - reserved
1176 * EAX, EBX - Smallest and largest monitor line size
1177 * ECX - extensions (ignored for now)
1178 * VIA: Reserved
1179 * Safe to expose
1180 */
1181 if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
1182 pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
1183
1184 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
1185 /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
1186 * Expose MWAIT extended features to the guest. For now we expose
1187 * just MWAIT break on interrupt feature (bit 1).
1188 */
1189 bool fMWaitExtensions;
1190 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); AssertRCReturn(rc, rc);
1191 if (fMWaitExtensions)
1192 {
1193 pCPUM->aGuestCpuIdStd[5].ecx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1194 /* @todo: for now we just expose host's MWAIT C-states, although conceptually
1195 it shall be part of our power management virtualization model */
1196#if 0
1197 /* MWAIT sub C-states */
1198 pCPUM->aGuestCpuIdStd[5].edx =
1199 (0 << 0) /* 0 in C0 */ |
1200 (2 << 4) /* 2 in C1 */ |
1201 (2 << 8) /* 2 in C2 */ |
1202 (2 << 12) /* 2 in C3 */ |
1203 (0 << 16) /* 0 in C4 */
1204 ;
1205#endif
1206 }
1207 else
1208 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
1209
1210 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
1211 * Safe to pass on to the guest.
1212 *
1213 * Intel: 0x800000005 reserved
1214 * 0x800000006 L2 cache information
1215 * AMD: 0x800000005 L1 cache information
1216 * 0x800000006 L2/L3 cache information
1217 * VIA: 0x800000005 TLB and L1 cache information
1218 * 0x800000006 L2 cache information
1219 */
1220
1221 /* Cpuid 0x800000007:
1222 * Intel: Reserved
1223 * AMD: EAX, EBX, ECX - reserved
1224 * EDX: Advanced Power Management Information
1225 * VIA: Reserved
1226 */
1227 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
1228 {
1229 Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID);
1230
1231 pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
1232
1233 if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1234 {
1235 /* Only expose the TSC invariant capability bit to the guest. */
1236 pCPUM->aGuestCpuIdExt[7].edx &= 0
1237 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
1238 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
1239 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
1240 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
1241 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
1242 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
1243 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
1244 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
1245#if 0
1246 /*
1247 * We don't expose X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR, because newer
1248 * Linux kernels blindly assume that the AMD performance counters work
1249 * if this is set for 64 bits guests. (Can't really find a CPUID feature
1250 * bit for them though.)
1251 */
1252 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
1253#endif
1254 | 0;
1255 }
1256 else
1257 pCPUM->aGuestCpuIdExt[7].edx = 0;
1258 }
1259
1260 /* Cpuid 0x800000008:
1261 * Intel: EAX: Virtual/Physical address Size
1262 * EBX, ECX, EDX - reserved
1263 * AMD: EBX, EDX - reserved
1264 * EAX: Virtual/Physical/Guest address Size
1265 * ECX: Number of cores + APICIdCoreIdSize
1266 * VIA: EAX: Virtual/Physical address Size
1267 * EBX, ECX, EDX - reserved
1268 */
1269 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
1270 {
1271 /* Only expose the virtual and physical address sizes to the guest. */
1272 pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);
1273 pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
1274 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
1275 * NC (0-7) Number of cores; 0 equals 1 core */
1276 pCPUM->aGuestCpuIdExt[8].ecx = 0;
1277#ifdef VBOX_WITH_MULTI_CORE
1278 if ( pVM->cCpus > 1
1279 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1280 {
1281 /* Legacy method to determine the number of cores. */
1282 pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
1283 pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
1284 }
1285#endif
1286 }
1287
1288 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
1289 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
1290 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
1291 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
1292 */
1293 bool fNt4LeafLimit;
1294 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc);
1295 if (fNt4LeafLimit)
1296 pCPUM->aGuestCpuIdStd[0].eax = 3; /** @todo r=bird: shouldn't we check if pCPUM->aGuestCpuIdStd[0].eax > 3 before setting it 3 here? */
1297
1298 /*
1299 * Limit it the number of entries and fill the remaining with the defaults.
1300 *
1301 * The limits are masking off stuff about power saving and similar, this
1302 * is perhaps a bit crudely done as there is probably some relatively harmless
1303 * info too in these leaves (like words about having a constant TSC).
1304 */
1305 if (pCPUM->aGuestCpuIdStd[0].eax > 5)
1306 pCPUM->aGuestCpuIdStd[0].eax = 5;
1307 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
1308 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
1309
1310 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
1311 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
1312 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
1313 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
1314 : 0;
1315 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
1316 i++)
1317 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
1318
1319 /*
1320 * Centaur stuff (VIA).
1321 *
1322 * The important part here (we think) is to make sure the 0xc0000000
1323 * function returns 0xc0000001. As for the features, we don't currently
1324 * let on about any of those... 0xc0000002 seems to be some
1325 * temperature/hz/++ stuff, include it as well (static).
1326 */
1327 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
1328 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
1329 {
1330 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
1331 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
1332 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
1333 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
1334 i++)
1335 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
1336 }
1337 else
1338 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
1339 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
1340
1341 /*
1342 * Hypervisor identification.
1343 *
1344 * We only return minimal information, primarily ensuring that the
1345 * 0x40000000 function returns 0x40000001 and identifying ourselves.
1346 * Currently we do not support any hypervisor-specific interface.
1347 */
1348 pCPUM->aGuestCpuIdHyper[0].eax = UINT32_C(0x40000001);
1349 pCPUM->aGuestCpuIdHyper[0].ebx = pCPUM->aGuestCpuIdHyper[0].ecx
1350 = pCPUM->aGuestCpuIdHyper[0].edx = 0x786f4256; /* 'VBox' */
1351 pCPUM->aGuestCpuIdHyper[1].eax = 0x656e6f6e; /* 'none' */
1352 pCPUM->aGuestCpuIdHyper[1].ebx = pCPUM->aGuestCpuIdHyper[1].ecx
1353 = pCPUM->aGuestCpuIdHyper[1].edx = 0; /* Reserved */
1354
1355 /*
1356 * Load CPUID overrides from configuration.
1357 * Note: Kind of redundant now, but allows unchanged overrides
1358 */
1359 /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
1360 * Overrides the CPUID leaf values. */
1361 PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID");
1362 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pOverrideCfg);
1363 AssertRCReturn(rc, rc);
1364 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pOverrideCfg);
1365 AssertRCReturn(rc, rc);
1366 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg);
1367 AssertRCReturn(rc, rc);
1368
1369 /*
1370 * Check if PAE was explicitely enabled by the user.
1371 */
1372 bool fEnable;
1373 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc);
1374 if (fEnable)
1375 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1376
1377 /*
1378 * We don't normally enable NX for raw-mode, so give the user a chance to
1379 * force it on.
1380 */
1381 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc);
1382 if (fEnable)
1383 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1384
1385 /*
1386 * We don't enable the Hypervisor Present bit by default, but it may
1387 * be needed by some guests.
1388 */
1389 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false); AssertRCReturn(rc, rc);
1390 if (fEnable)
1391 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);
1392
1393#undef PORTABLE_DISABLE_FEATURE_BIT
1394#undef PORTABLE_CLEAR_BITS_WHEN
1395
1396 return VINF_SUCCESS;
1397}
1398
1399
1400/**
1401 * Applies relocations to data and code managed by this
1402 * component. This function will be called at init and
1403 * whenever the VMM need to relocate it self inside the GC.
1404 *
1405 * The CPUM will update the addresses used by the switcher.
1406 *
1407 * @param pVM The VM.
1408 */
1409VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
1410{
1411 LogFlow(("CPUMR3Relocate\n"));
1412 /* nothing to do any more. */
1413}
1414
1415
1416/**
1417 * Apply late CPUM property changes based on the fHWVirtEx setting
1418 *
1419 * @param pVM Pointer to the VM.
1420 * @param fHWVirtExEnabled HWVirtEx enabled/disabled
1421 */
1422VMMR3DECL(void) CPUMR3SetHWVirtEx(PVM pVM, bool fHWVirtExEnabled)
1423{
1424 /*
1425 * Workaround for missing cpuid(0) patches when leaf 4 returns GuestCpuIdDef:
1426 * If we miss to patch a cpuid(0).eax then Linux tries to determine the number
1427 * of processors from (cpuid(4).eax >> 26) + 1.
1428 *
1429 * Note: this code is obsolete, but let's keep it here for reference.
1430 * Purpose is valid when we artificially cap the max std id to less than 4.
1431 */
1432 if (!fHWVirtExEnabled)
1433 {
1434 Assert( pVM->cpum.s.aGuestCpuIdStd[4].eax == 0
1435 || pVM->cpum.s.aGuestCpuIdStd[0].eax < 0x4);
1436 pVM->cpum.s.aGuestCpuIdStd[4].eax = 0;
1437 }
1438}
1439
1440/**
1441 * Terminates the CPUM.
1442 *
1443 * Termination means cleaning up and freeing all resources,
1444 * the VM it self is at this point powered off or suspended.
1445 *
1446 * @returns VBox status code.
1447 * @param pVM Pointer to the VM.
1448 */
1449VMMR3DECL(int) CPUMR3Term(PVM pVM)
1450{
1451#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1452 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1453 {
1454 PVMCPU pVCpu = &pVM->aCpus[i];
1455 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1456
1457 memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
1458 pVCpu->cpum.s.uMagic = 0;
1459 pCtx->dr[5] = 0;
1460 }
1461#else
1462 NOREF(pVM);
1463#endif
1464 return VINF_SUCCESS;
1465}
1466
1467
1468/**
1469 * Resets a virtual CPU.
1470 *
1471 * Used by CPUMR3Reset and CPU hot plugging.
1472 *
1473 * @param pVCpu Pointer to the VMCPU.
1474 */
1475VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
1476{
1477 /** @todo anything different for VCPU > 0? */
1478 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1479
1480 /*
1481 * Initialize everything to ZERO first.
1482 */
1483 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
1484 memset(pCtx, 0, sizeof(*pCtx));
1485 pVCpu->cpum.s.fUseFlags = fUseFlags;
1486
1487 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
1488 pCtx->eip = 0x0000fff0;
1489 pCtx->edx = 0x00000600; /* P6 processor */
1490 pCtx->eflags.Bits.u1Reserved0 = 1;
1491
1492 pCtx->cs.Sel = 0xf000;
1493 pCtx->cs.ValidSel = 0xf000;
1494 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1495 pCtx->cs.u64Base = UINT64_C(0xffff0000);
1496 pCtx->cs.u32Limit = 0x0000ffff;
1497 pCtx->cs.Attr.n.u1DescType = 1; /* code/data segment */
1498 pCtx->cs.Attr.n.u1Present = 1;
1499 pCtx->cs.Attr.n.u4Type = X86_SEL_TYPE_ER_ACC;
1500
1501 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
1502 pCtx->ds.u32Limit = 0x0000ffff;
1503 pCtx->ds.Attr.n.u1DescType = 1; /* code/data segment */
1504 pCtx->ds.Attr.n.u1Present = 1;
1505 pCtx->ds.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
1506
1507 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
1508 pCtx->es.u32Limit = 0x0000ffff;
1509 pCtx->es.Attr.n.u1DescType = 1; /* code/data segment */
1510 pCtx->es.Attr.n.u1Present = 1;
1511 pCtx->es.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
1512
1513 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
1514 pCtx->fs.u32Limit = 0x0000ffff;
1515 pCtx->fs.Attr.n.u1DescType = 1; /* code/data segment */
1516 pCtx->fs.Attr.n.u1Present = 1;
1517 pCtx->fs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
1518
1519 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
1520 pCtx->gs.u32Limit = 0x0000ffff;
1521 pCtx->gs.Attr.n.u1DescType = 1; /* code/data segment */
1522 pCtx->gs.Attr.n.u1Present = 1;
1523 pCtx->gs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
1524
1525 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1526 pCtx->ss.u32Limit = 0x0000ffff;
1527 pCtx->ss.Attr.n.u1Present = 1;
1528 pCtx->ss.Attr.n.u1DescType = 1; /* code/data segment */
1529 pCtx->ss.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
1530
1531 pCtx->idtr.cbIdt = 0xffff;
1532 pCtx->gdtr.cbGdt = 0xffff;
1533
1534 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1535 pCtx->ldtr.u32Limit = 0xffff;
1536 pCtx->ldtr.Attr.n.u1Present = 1;
1537 pCtx->ldtr.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
1538
1539 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
1540 pCtx->tr.u32Limit = 0xffff;
1541 pCtx->tr.Attr.n.u1Present = 1;
1542 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; /* Deduction, not properly documented by Intel. */
1543
1544 pCtx->dr[6] = X86_DR6_INIT_VAL;
1545 pCtx->dr[7] = X86_DR7_INIT_VAL;
1546
1547 pCtx->fpu.FTW = 0x00; /* All empty (abbridged tag reg edition). */
1548 pCtx->fpu.FCW = 0x37f;
1549
1550 /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1.
1551 IA-32 Processor States Following Power-up, Reset, or INIT */
1552 pCtx->fpu.MXCSR = 0x1F80;
1553 pCtx->fpu.MXCSR_MASK = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really
1554 supports all bits, since a zero value here should be read as 0xffbf. */
1555
1556 /* Init PAT MSR */
1557 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
1558
1559 /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State.
1560 * The Intel docs don't mention it. */
1561 Assert(!pCtx->msrEFER);
1562
1563 /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
1564 * called from each EMT while we're getting called by CPUMR3Reset()
1565 * iteratively on the same thread. Fix later. */
1566#if 0
1567 /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
1568 CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
1569#endif
1570
1571 /*
1572 * Get the APIC base MSR from the APIC device. For historical reasons (saved state), the APIC base
1573 * continues to reside in the APIC device and we cache it here in the VCPU for all further accesses.
1574 */
1575 PDMApicGetBase(pVCpu, &pCtx->msrApicBase);
1576}
1577
1578
1579/**
1580 * Resets the CPU.
1581 *
1582 * @returns VINF_SUCCESS.
1583 * @param pVM Pointer to the VM.
1584 */
1585VMMR3DECL(void) CPUMR3Reset(PVM pVM)
1586{
1587 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1588 {
1589 CPUMR3ResetCpu(&pVM->aCpus[i]);
1590
1591#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1592 PCPUMCTX pCtx = &pVM->aCpus[i].cpum.s.Guest;
1593
1594 /* Magic marker for searching in crash dumps. */
1595 strcpy((char *)pVM->aCpus[i].cpum.s.aMagic, "CPUMCPU Magic");
1596 pVM->aCpus[i].cpum.s.uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1597 pCtx->dr[5] = UINT64_C(0xDEADBEEFDEADBEEF);
1598#endif
1599 }
1600}
1601
1602
1603/**
1604 * Called both in pass 0 and the final pass.
1605 *
1606 * @param pVM Pointer to the VM.
1607 * @param pSSM The saved state handle.
1608 */
1609static void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
1610{
1611 /*
1612 * Save all the CPU ID leaves here so we can check them for compatibility
1613 * upon loading.
1614 */
1615 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
1616 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
1617
1618 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
1619 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
1620
1621 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
1622 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
1623
1624 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
1625
1626 /*
1627 * Save a good portion of the raw CPU IDs as well as they may come in
1628 * handy when validating features for raw mode.
1629 */
1630 CPUMCPUID aRawStd[16];
1631 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
1632 ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
1633 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
1634 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
1635
1636 CPUMCPUID aRawExt[32];
1637 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
1638 ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
1639 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
1640 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
1641}
1642
1643
1644/**
1645 * Loads the CPU ID leaves saved by pass 0.
1646 *
1647 * @returns VBox status code.
1648 * @param pVM Pointer to the VM.
1649 * @param pSSM The saved state handle.
1650 * @param uVersion The format version.
1651 */
1652static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
1653{
1654 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
1655
1656 /*
1657 * Define a bunch of macros for simplifying the code.
1658 */
1659 /* Generic expression + failure message. */
1660#define CPUID_CHECK_RET(expr, fmt) \
1661 do { \
1662 if (!(expr)) \
1663 { \
1664 char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \
1665 if (fStrictCpuIdChecks) \
1666 { \
1667 int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \
1668 RTStrFree(pszMsg); \
1669 return rcCpuid; \
1670 } \
1671 LogRel(("CPUM: %s\n", pszMsg)); \
1672 RTStrFree(pszMsg); \
1673 } \
1674 } while (0)
1675#define CPUID_CHECK_WRN(expr, fmt) \
1676 do { \
1677 if (!(expr)) \
1678 LogRel(fmt); \
1679 } while (0)
1680
1681 /* For comparing two values and bitch if they differs. */
1682#define CPUID_CHECK2_RET(what, host, saved) \
1683 do { \
1684 if ((host) != (saved)) \
1685 { \
1686 if (fStrictCpuIdChecks) \
1687 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1688 N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \
1689 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
1690 } \
1691 } while (0)
1692#define CPUID_CHECK2_WRN(what, host, saved) \
1693 do { \
1694 if ((host) != (saved)) \
1695 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
1696 } while (0)
1697
1698 /* For checking raw cpu features (raw mode). */
1699#define CPUID_RAW_FEATURE_RET(set, reg, bit) \
1700 do { \
1701 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
1702 { \
1703 if (fStrictCpuIdChecks) \
1704 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1705 N_(#bit " mismatch: host=%d saved=%d"), \
1706 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \
1707 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
1708 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
1709 } \
1710 } while (0)
1711#define CPUID_RAW_FEATURE_WRN(set, reg, bit) \
1712 do { \
1713 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
1714 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
1715 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
1716 } while (0)
1717#define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0)
1718
1719 /* For checking guest features. */
1720#define CPUID_GST_FEATURE_RET(set, reg, bit) \
1721 do { \
1722 if ( (aGuestCpuId##set [1].reg & bit) \
1723 && !(aHostRaw##set [1].reg & bit) \
1724 && !(aHostOverride##set [1].reg & bit) \
1725 && !(aGuestOverride##set [1].reg & bit) \
1726 ) \
1727 { \
1728 if (fStrictCpuIdChecks) \
1729 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1730 N_(#bit " is not supported by the host but has already exposed to the guest")); \
1731 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
1732 } \
1733 } while (0)
1734#define CPUID_GST_FEATURE_WRN(set, reg, bit) \
1735 do { \
1736 if ( (aGuestCpuId##set [1].reg & bit) \
1737 && !(aHostRaw##set [1].reg & bit) \
1738 && !(aHostOverride##set [1].reg & bit) \
1739 && !(aGuestOverride##set [1].reg & bit) \
1740 ) \
1741 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
1742 } while (0)
1743#define CPUID_GST_FEATURE_EMU(set, reg, bit) \
1744 do { \
1745 if ( (aGuestCpuId##set [1].reg & bit) \
1746 && !(aHostRaw##set [1].reg & bit) \
1747 && !(aHostOverride##set [1].reg & bit) \
1748 && !(aGuestOverride##set [1].reg & bit) \
1749 ) \
1750 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
1751 } while (0)
1752#define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0)
1753
1754 /* For checking guest features if AMD guest CPU. */
1755#define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \
1756 do { \
1757 if ( (aGuestCpuId##set [1].reg & bit) \
1758 && fGuestAmd \
1759 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
1760 && !(aHostOverride##set [1].reg & bit) \
1761 && !(aGuestOverride##set [1].reg & bit) \
1762 ) \
1763 { \
1764 if (fStrictCpuIdChecks) \
1765 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1766 N_(#bit " is not supported by the host but has already exposed to the guest")); \
1767 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
1768 } \
1769 } while (0)
1770#define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \
1771 do { \
1772 if ( (aGuestCpuId##set [1].reg & bit) \
1773 && fGuestAmd \
1774 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
1775 && !(aHostOverride##set [1].reg & bit) \
1776 && !(aGuestOverride##set [1].reg & bit) \
1777 ) \
1778 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
1779 } while (0)
1780#define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \
1781 do { \
1782 if ( (aGuestCpuId##set [1].reg & bit) \
1783 && fGuestAmd \
1784 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
1785 && !(aHostOverride##set [1].reg & bit) \
1786 && !(aGuestOverride##set [1].reg & bit) \
1787 ) \
1788 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
1789 } while (0)
1790#define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0)
1791
1792 /* For checking AMD features which have a corresponding bit in the standard
1793 range. (Intel defines very few bits in the extended feature sets.) */
1794#define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \
1795 do { \
1796 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
1797 && !(fHostAmd \
1798 ? aHostRawExt[1].reg & (ExtBit) \
1799 : aHostRawStd[1].reg & (StdBit)) \
1800 && !(aHostOverrideExt[1].reg & (ExtBit)) \
1801 && !(aGuestOverrideExt[1].reg & (ExtBit)) \
1802 ) \
1803 { \
1804 if (fStrictCpuIdChecks) \
1805 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
1806 N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \
1807 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
1808 } \
1809 } while (0)
1810#define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \
1811 do { \
1812 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
1813 && !(fHostAmd \
1814 ? aHostRawExt[1].reg & (ExtBit) \
1815 : aHostRawStd[1].reg & (StdBit)) \
1816 && !(aHostOverrideExt[1].reg & (ExtBit)) \
1817 && !(aGuestOverrideExt[1].reg & (ExtBit)) \
1818 ) \
1819 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
1820 } while (0)
1821#define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \
1822 do { \
1823 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
1824 && !(fHostAmd \
1825 ? aHostRawExt[1].reg & (ExtBit) \
1826 : aHostRawStd[1].reg & (StdBit)) \
1827 && !(aHostOverrideExt[1].reg & (ExtBit)) \
1828 && !(aGuestOverrideExt[1].reg & (ExtBit)) \
1829 ) \
1830 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
1831 } while (0)
1832#define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0)
1833
1834 /*
1835 * Load them into stack buffers first.
1836 */
1837 CPUMCPUID aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)];
1838 uint32_t cGuestCpuIdStd;
1839 int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc);
1840 if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd))
1841 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1842 SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0]));
1843
1844 CPUMCPUID aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)];
1845 uint32_t cGuestCpuIdExt;
1846 rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc);
1847 if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt))
1848 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1849 SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0]));
1850
1851 CPUMCPUID aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)];
1852 uint32_t cGuestCpuIdCentaur;
1853 rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc);
1854 if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur))
1855 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1856 SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0]));
1857
1858 CPUMCPUID GuestCpuIdDef;
1859 rc = SSMR3GetMem(pSSM, &GuestCpuIdDef, sizeof(GuestCpuIdDef));
1860 AssertRCReturn(rc, rc);
1861
1862 CPUMCPUID aRawStd[16];
1863 uint32_t cRawStd;
1864 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
1865 if (cRawStd > RT_ELEMENTS(aRawStd))
1866 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1867 SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
1868
1869 CPUMCPUID aRawExt[32];
1870 uint32_t cRawExt;
1871 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
1872 if (cRawExt > RT_ELEMENTS(aRawExt))
1873 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1874 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
1875 AssertRCReturn(rc, rc);
1876
1877 /*
1878 * Note that we support restoring less than the current amount of standard
1879 * leaves because we've been allowed more is newer version of VBox.
1880 *
1881 * So, pad new entries with the default.
1882 */
1883 for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)
1884 aGuestCpuIdStd[i] = GuestCpuIdDef;
1885
1886 for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)
1887 aGuestCpuIdExt[i] = GuestCpuIdDef;
1888
1889 for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)
1890 aGuestCpuIdCentaur[i] = GuestCpuIdDef;
1891
1892 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
1893 ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
1894
1895 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
1896 ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
1897
1898 /*
1899 * Get the raw CPU IDs for the current host.
1900 */
1901 CPUMCPUID aHostRawStd[16];
1902 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
1903 ASMCpuId(i, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
1904
1905 CPUMCPUID aHostRawExt[32];
1906 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
1907 ASMCpuId(i | UINT32_C(0x80000000), &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
1908
1909 /*
1910 * Get the host and guest overrides so we don't reject the state because
1911 * some feature was enabled thru these interfaces.
1912 * Note! We currently only need the feature leaves, so skip rest.
1913 */
1914 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/CPUID");
1915 CPUMCPUID aGuestOverrideStd[2];
1916 memcpy(&aGuestOverrideStd[0], &aHostRawStd[0], sizeof(aGuestOverrideStd));
1917 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aGuestOverrideStd[0], RT_ELEMENTS(aGuestOverrideStd), pOverrideCfg);
1918
1919 CPUMCPUID aGuestOverrideExt[2];
1920 memcpy(&aGuestOverrideExt[0], &aHostRawExt[0], sizeof(aGuestOverrideExt));
1921 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aGuestOverrideExt[0], RT_ELEMENTS(aGuestOverrideExt), pOverrideCfg);
1922
1923 pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
1924 CPUMCPUID aHostOverrideStd[2];
1925 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
1926 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);
1927
1928 CPUMCPUID aHostOverrideExt[2];
1929 memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt));
1930 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg);
1931
1932 /*
1933 * This can be skipped.
1934 */
1935 bool fStrictCpuIdChecks;
1936 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks, true);
1937
1938
1939
1940 /*
1941 * For raw-mode we'll require that the CPUs are very similar since we don't
1942 * intercept CPUID instructions for user mode applications.
1943 */
1944 if (!HMIsEnabled(pVM))
1945 {
1946 /* CPUID(0) */
1947 CPUID_CHECK_RET( aHostRawStd[0].ebx == aRawStd[0].ebx
1948 && aHostRawStd[0].ecx == aRawStd[0].ecx
1949 && aHostRawStd[0].edx == aRawStd[0].edx,
1950 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
1951 &aHostRawStd[0].ebx, &aHostRawStd[0].edx, &aHostRawStd[0].ecx,
1952 &aRawStd[0].ebx, &aRawStd[0].edx, &aRawStd[0].ecx));
1953 CPUID_CHECK2_WRN("Std CPUID max leaf", aHostRawStd[0].eax, aRawStd[0].eax);
1954 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3);
1955 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);
1956
1957 bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].ebx, aRawStd[0].ecx, aRawStd[0].edx);
1958
1959 /* CPUID(1).eax */
1960 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawStd[1].eax), ASMGetCpuFamily(aRawStd[1].eax));
1961 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawStd[1].eax, fIntel), ASMGetCpuModel(aRawStd[1].eax, fIntel));
1962 CPUID_CHECK2_WRN("CPU type", (aHostRawStd[1].eax >> 12) & 3, (aRawStd[1].eax >> 12) & 3 );
1963
1964 /* CPUID(1).ebx - completely ignore CPU count and APIC ID. */
1965 CPUID_CHECK2_RET("CPU brand ID", aHostRawStd[1].ebx & 0xff, aRawStd[1].ebx & 0xff);
1966 CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].ebx >> 8) & 0xff, (aRawStd[1].ebx >> 8) & 0xff);
1967
1968 /* CPUID(1).ecx */
1969 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);
1970 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);
1971 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);
1972 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
1973 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);
1974 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);
1975 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);
1976 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_EST);
1977 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);
1978 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);
1979 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);
1980 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
1981 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);
1982 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);
1983 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);
1984 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);
1985 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
1986 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/);
1987 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);
1988 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);
1989 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);
1990 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
1991 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);
1992 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);
1993 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/);
1994 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);
1995 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);
1996 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE);
1997 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);
1998 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/);
1999 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/);
2000 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);
2001
2002 /* CPUID(1).edx */
2003 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
2004 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
2005 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);
2006 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
2007 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);
2008 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);
2009 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
2010 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
2011 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);
2012 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
2013 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
2014 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
2015 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
2016 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
2017 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
2018 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);
2019 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
2020 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
2021 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
2022 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);
2023 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
2024 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_DS);
2025 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);
2026 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);
2027 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);
2028 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);
2029 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);
2030 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SS);
2031 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_HTT);
2032 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_TM);
2033 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);
2034 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PBE);
2035
2036 /* CPUID(2) - config, mostly about caches. ignore. */
2037 /* CPUID(3) - processor serial number. ignore. */
2038 /* CPUID(4) - config, cache and topology - takes ECX as input. ignore. */
2039 /* CPUID(5) - mwait/monitor config. ignore. */
2040 /* CPUID(6) - power management. ignore. */
2041 /* CPUID(7) - ???. ignore. */
2042 /* CPUID(8) - ???. ignore. */
2043 /* CPUID(9) - DCA. ignore for now. */
2044 /* CPUID(a) - PeMo info. ignore for now. */
2045 /* CPUID(b) - topology info - takes ECX as input. ignore. */
2046
2047 /* CPUID(d) - XCR0 stuff - takes ECX as input. We only warn about the main level (ECX=0) for now. */
2048 CPUID_CHECK_WRN( aRawStd[0].eax < UINT32_C(0x0000000d)
2049 || aHostRawStd[0].eax >= UINT32_C(0x0000000d),
2050 ("CPUM: Standard leaf D was present on saved state host, not present on current.\n"));
2051 if ( aRawStd[0].eax >= UINT32_C(0x0000000d)
2052 && aHostRawStd[0].eax >= UINT32_C(0x0000000d))
2053 {
2054 CPUID_CHECK2_WRN("Valid low XCR0 bits", aHostRawStd[0xd].eax, aRawStd[0xd].eax);
2055 CPUID_CHECK2_WRN("Valid high XCR0 bits", aHostRawStd[0xd].edx, aRawStd[0xd].edx);
2056 CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size", aHostRawStd[0xd].ebx, aRawStd[0xd].ebx);
2057 CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size", aHostRawStd[0xd].ecx, aRawStd[0xd].ecx);
2058 }
2059
2060 /* CPUID(0x80000000) - same as CPUID(0) except for eax.
2061 Note! Intel have/is marking many of the fields here as reserved. We
2062 will verify them as if it's an AMD CPU. */
2063 CPUID_CHECK_RET( (aHostRawExt[0].eax >= UINT32_C(0x80000001) && aHostRawExt[0].eax <= UINT32_C(0x8000007f))
2064 || !(aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f)),
2065 (N_("Extended leaves was present on saved state host, but is missing on the current\n")));
2066 if (aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f))
2067 {
2068 CPUID_CHECK_RET( aHostRawExt[0].ebx == aRawExt[0].ebx
2069 && aHostRawExt[0].ecx == aRawExt[0].ecx
2070 && aHostRawExt[0].edx == aRawExt[0].edx,
2071 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
2072 &aHostRawExt[0].ebx, &aHostRawExt[0].edx, &aHostRawExt[0].ecx,
2073 &aRawExt[0].ebx, &aRawExt[0].edx, &aRawExt[0].ecx));
2074 CPUID_CHECK2_WRN("Ext CPUID max leaf", aHostRawExt[0].eax, aRawExt[0].eax);
2075
2076 /* CPUID(0x80000001).eax - same as CPUID(0).eax. */
2077 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawExt[1].eax), ASMGetCpuFamily(aRawExt[1].eax));
2078 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawExt[1].eax, fIntel), ASMGetCpuModel(aRawExt[1].eax, fIntel));
2079 CPUID_CHECK2_WRN("CPU type", (aHostRawExt[1].eax >> 12) & 3, (aRawExt[1].eax >> 12) & 3 );
2080 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3 );
2081 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);
2082
2083 /* CPUID(0x80000001).ebx - Brand ID (maybe), just warn if things differs. */
2084 CPUID_CHECK2_WRN("CPU BrandID", aHostRawExt[1].ebx & 0xffff, aRawExt[1].ebx & 0xffff);
2085 CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].ebx >> 16) & 0xfff, (aRawExt[1].ebx >> 16) & 0xfff);
2086 CPUID_CHECK2_WRN("PkgType", (aHostRawExt[1].ebx >> 28) & 0xf, (aRawExt[1].ebx >> 28) & 0xf);
2087
2088 /* CPUID(0x80000001).ecx */
2089 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
2090 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);
2091 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);
2092 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);
2093 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);
2094 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);
2095 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);
2096 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);
2097 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);
2098 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);
2099 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);
2100 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);
2101 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);
2102 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);
2103 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
2104 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
2105 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
2106 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
2107 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
2108 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
2109 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
2110 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
2111 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
2112 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
2113 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
2114 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
2115 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
2116 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
2117 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
2118 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
2119 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
2120 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
2121
2122 /* CPUID(0x80000001).edx */
2123 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FPU);
2124 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_VME);
2125 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_DE);
2126 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE);
2127 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_TSC);
2128 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MSR);
2129 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAE);
2130 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCE);
2131 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CX8);
2132 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC);
2133 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/);
2134 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP);
2135 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR);
2136 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE);
2137 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCA);
2138 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CMOV);
2139 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAT);
2140 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE36);
2141 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/);
2142 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/);
2143 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
2144 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/);
2145 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
2146 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MMX);
2147 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR);
2148 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
2149 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
2150 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
2151 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/);
2152 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
2153 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
2154 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
2155
2156 /** @todo verify the rest as well. */
2157 }
2158 }
2159
2160
2161
2162 /*
2163 * Verify that we can support the features already exposed to the guest on
2164 * this host.
2165 *
2166 * Most of the features we're emulating requires intercepting instruction
2167 * and doing it the slow way, so there is no need to warn when they aren't
2168 * present in the host CPU. Thus we use IGN instead of EMU on these.
2169 *
2170 * Trailing comments:
2171 * "EMU" - Possible to emulate, could be lots of work and very slow.
2172 * "EMU?" - Can this be emulated?
2173 */
2174 /* CPUID(1).ecx */
2175 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU
2176 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?
2177 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?
2178 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
2179 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?
2180 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU
2181 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU
2182 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_EST); // -> EMU
2183 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?
2184 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU
2185 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU
2186 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
2187 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?
2188 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?
2189 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
2190 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU
2191 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
2192 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/);
2193 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?
2194 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU
2195 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU
2196 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
2197 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU
2198 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU
2199 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/);
2200 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES); // -> EMU
2201 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU
2202 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU
2203 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?
2204 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/);
2205 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/);
2206 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host
2207
2208 /* CPUID(1).edx */
2209 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
2210 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
2211 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?
2212 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
2213 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
2214 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
2215 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
2216 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
2217 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
2218 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
2219 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
2220 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
2221 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
2222 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
2223 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
2224 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
2225 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
2226 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
2227 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
2228 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU
2229 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
2230 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?
2231 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?
2232 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
2233 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
2234 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU
2235 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU
2236 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?
2237 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?
2238 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?
2239 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU
2240 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?
2241
2242 /* CPUID(0x80000000). */
2243 if ( aGuestCpuIdExt[0].eax >= UINT32_C(0x80000001)
2244 && aGuestCpuIdExt[0].eax < UINT32_C(0x8000007f))
2245 {
2246 /** @todo deal with no 0x80000001 on the host. */
2247 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx);
2248 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].ebx, aGuestCpuIdExt[0].ecx, aGuestCpuIdExt[0].edx);
2249
2250 /* CPUID(0x80000001).ecx */
2251 CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU
2252 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU
2253 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU
2254 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
2255 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU
2256 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU
2257 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU
2258 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
2259 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
2260 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?
2261 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU
2262 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5); // -> EMU
2263 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU
2264 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU
2265 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
2266 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
2267 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
2268 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
2269 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
2270 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
2271 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
2272 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
2273 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
2274 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
2275 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
2276 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
2277 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
2278 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
2279 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
2280 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
2281 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
2282 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
2283
2284 /* CPUID(0x80000001).edx */
2285 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU
2286 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU
2287 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU
2288 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);
2289 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
2290 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
2291 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);
2292 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);
2293 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
2294 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);
2295 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/);
2296 CPUID_GST_FEATURE_IGN( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only.
2297 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);
2298 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);
2299 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);
2300 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
2301 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);
2302 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
2303 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/);
2304 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/);
2305 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
2306 CPUID_GST_FEATURE_WRN( Ext, edx, RT_BIT_32(21) /*reserved*/);
2307 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
2308 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
2309 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
2310 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
2311 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
2312 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
2313 CPUID_GST_FEATURE_IGN( Ext, edx, RT_BIT_32(28) /*reserved*/);
2314 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
2315 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
2316 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
2317 }
2318
2319 /*
2320 * We're good, commit the CPU ID leaves.
2321 */
2322 memcpy(&pVM->cpum.s.aGuestCpuIdStd[0], &aGuestCpuIdStd[0], sizeof(aGuestCpuIdStd));
2323 memcpy(&pVM->cpum.s.aGuestCpuIdExt[0], &aGuestCpuIdExt[0], sizeof(aGuestCpuIdExt));
2324 memcpy(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur));
2325 pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef;
2326
2327#undef CPUID_CHECK_RET
2328#undef CPUID_CHECK_WRN
2329#undef CPUID_CHECK2_RET
2330#undef CPUID_CHECK2_WRN
2331#undef CPUID_RAW_FEATURE_RET
2332#undef CPUID_RAW_FEATURE_WRN
2333#undef CPUID_RAW_FEATURE_IGN
2334#undef CPUID_GST_FEATURE_RET
2335#undef CPUID_GST_FEATURE_WRN
2336#undef CPUID_GST_FEATURE_EMU
2337#undef CPUID_GST_FEATURE_IGN
2338#undef CPUID_GST_FEATURE2_RET
2339#undef CPUID_GST_FEATURE2_WRN
2340#undef CPUID_GST_FEATURE2_EMU
2341#undef CPUID_GST_FEATURE2_IGN
2342#undef CPUID_GST_AMD_FEATURE_RET
2343#undef CPUID_GST_AMD_FEATURE_WRN
2344#undef CPUID_GST_AMD_FEATURE_EMU
2345#undef CPUID_GST_AMD_FEATURE_IGN
2346
2347 return VINF_SUCCESS;
2348}
2349
2350
2351/**
2352 * Pass 0 live exec callback.
2353 *
2354 * @returns VINF_SSM_DONT_CALL_AGAIN.
2355 * @param pVM Pointer to the VM.
2356 * @param pSSM The saved state handle.
2357 * @param uPass The pass (0).
2358 */
2359static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
2360{
2361 AssertReturn(uPass == 0, VERR_SSM_UNEXPECTED_PASS);
2362 cpumR3SaveCpuId(pVM, pSSM);
2363 return VINF_SSM_DONT_CALL_AGAIN;
2364}
2365
2366
2367/**
2368 * Execute state save operation.
2369 *
2370 * @returns VBox status code.
2371 * @param pVM Pointer to the VM.
2372 * @param pSSM SSM operation handle.
2373 */
2374static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2375{
2376 /*
2377 * Save.
2378 */
2379 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2380 {
2381 PVMCPU pVCpu = &pVM->aCpus[i];
2382 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), 0, g_aCpumCtxFields, NULL);
2383 }
2384
2385 SSMR3PutU32(pSSM, pVM->cCpus);
2386 SSMR3PutU32(pSSM, sizeof(pVM->aCpus[0].cpum.s.GuestMsrs.msr));
2387 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2388 {
2389 PVMCPU pVCpu = &pVM->aCpus[iCpu];
2390
2391 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), 0, g_aCpumCtxFields, NULL);
2392 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
2393 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
2394 AssertCompileSizeAlignment(pVCpu->cpum.s.GuestMsrs.msr, sizeof(uint64_t));
2395 SSMR3PutMem(pSSM, &pVCpu->cpum.s.GuestMsrs, sizeof(pVCpu->cpum.s.GuestMsrs.msr));
2396 }
2397
2398 cpumR3SaveCpuId(pVM, pSSM);
2399 return VINF_SUCCESS;
2400}
2401
2402
2403/**
2404 * @copydoc FNSSMINTLOADPREP
2405 */
2406static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2407{
2408 NOREF(pSSM);
2409 pVM->cpum.s.fPendingRestore = true;
2410 return VINF_SUCCESS;
2411}
2412
2413
2414/**
2415 * @copydoc FNSSMINTLOADEXEC
2416 */
2417static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2418{
2419 /*
2420 * Validate version.
2421 */
2422 if ( uVersion != CPUM_SAVED_STATE_VERSION
2423 && uVersion != CPUM_SAVED_STATE_VERSION_MEM
2424 && uVersion != CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE
2425 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_2
2426 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0
2427 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR
2428 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
2429 && uVersion != CPUM_SAVED_STATE_VERSION_VER1_6)
2430 {
2431 AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
2432 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2433 }
2434
2435 if (uPass == SSM_PASS_FINAL)
2436 {
2437 /*
2438 * Set the size of RTGCPTR for SSMR3GetGCPtr. (Only necessary for
2439 * really old SSM file versions.)
2440 */
2441 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
2442 SSMR3HandleSetGCPtrSize(pSSM, sizeof(RTGCPTR32));
2443 else if (uVersion <= CPUM_SAVED_STATE_VERSION_VER3_0)
2444 SSMR3HandleSetGCPtrSize(pSSM, HC_ARCH_BITS == 32 ? sizeof(RTGCPTR32) : sizeof(RTGCPTR));
2445
2446 uint32_t const fLoad = uVersion > CPUM_SAVED_STATE_VERSION_MEM ? 0 : SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED;
2447 PCSSMFIELD paCpumCtxFields = g_aCpumCtxFields;
2448 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
2449 paCpumCtxFields = g_aCpumCtxFieldsV16;
2450 else if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
2451 paCpumCtxFields = g_aCpumCtxFieldsMem;
2452
2453 /*
2454 * Restore.
2455 */
2456 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2457 {
2458 PVMCPU pVCpu = &pVM->aCpus[iCpu];
2459 uint64_t uCR3 = pVCpu->cpum.s.Hyper.cr3;
2460 uint64_t uRSP = pVCpu->cpum.s.Hyper.rsp; /* see VMMR3Relocate(). */
2461 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), fLoad, paCpumCtxFields, NULL);
2462 pVCpu->cpum.s.Hyper.cr3 = uCR3;
2463 pVCpu->cpum.s.Hyper.rsp = uRSP;
2464 }
2465
2466 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
2467 {
2468 uint32_t cCpus;
2469 int rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
2470 AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
2471 VERR_SSM_UNEXPECTED_DATA);
2472 }
2473 AssertLogRelMsgReturn( uVersion > CPUM_SAVED_STATE_VERSION_VER2_0
2474 || pVM->cCpus == 1,
2475 ("cCpus=%u\n", pVM->cCpus),
2476 VERR_SSM_UNEXPECTED_DATA);
2477
2478 uint32_t cbMsrs = 0;
2479 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
2480 {
2481 int rc = SSMR3GetU32(pSSM, &cbMsrs); AssertRCReturn(rc, rc);
2482 AssertLogRelMsgReturn(RT_ALIGN(cbMsrs, sizeof(uint64_t)) == cbMsrs, ("Size of MSRs is misaligned: %#x\n", cbMsrs),
2483 VERR_SSM_UNEXPECTED_DATA);
2484 AssertLogRelMsgReturn(cbMsrs <= sizeof(CPUMCTXMSRS) && cbMsrs > 0, ("Size of MSRs is out of range: %#x\n", cbMsrs),
2485 VERR_SSM_UNEXPECTED_DATA);
2486 }
2487
2488 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2489 {
2490 PVMCPU pVCpu = &pVM->aCpus[iCpu];
2491 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), fLoad,
2492 paCpumCtxFields, NULL);
2493 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fUseFlags);
2494 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fChanged);
2495 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
2496 SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], cbMsrs);
2497 else if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_0)
2498 {
2499 SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], 2 * sizeof(uint64_t)); /* Restore two MSRs. */
2500 SSMR3Skip(pSSM, 62 * sizeof(uint64_t));
2501 }
2502 }
2503
2504 /* Older states does not have the internal selector register flags
2505 and valid selector value. Supply those. */
2506 if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
2507 {
2508 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2509 {
2510 PVMCPU pVCpu = &pVM->aCpus[iCpu];
2511 bool const fValid = HMIsEnabled(pVM)
2512 || ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
2513 && !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID));
2514 PCPUMSELREG paSelReg = CPUMCTX_FIRST_SREG(&pVCpu->cpum.s.Guest);
2515 if (fValid)
2516 {
2517 for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
2518 {
2519 paSelReg[iSelReg].fFlags = CPUMSELREG_FLAGS_VALID;
2520 paSelReg[iSelReg].ValidSel = paSelReg[iSelReg].Sel;
2521 }
2522
2523 pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2524 pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
2525 }
2526 else
2527 {
2528 for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
2529 {
2530 paSelReg[iSelReg].fFlags = 0;
2531 paSelReg[iSelReg].ValidSel = 0;
2532 }
2533
2534 /* This might not be 104% correct, but I think it's close
2535 enough for all practical purposes... (REM always loaded
2536 LDTR registers.) */
2537 pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2538 pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
2539 }
2540 pVCpu->cpum.s.Guest.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2541 pVCpu->cpum.s.Guest.tr.ValidSel = pVCpu->cpum.s.Guest.tr.Sel;
2542 }
2543 }
2544
2545 /* Clear CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID. */
2546 if ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
2547 && uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
2548 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2549 pVM->aCpus[iCpu].cpum.s.fChanged &= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
2550
2551 /*
2552 * A quick sanity check.
2553 */
2554 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2555 {
2556 PVMCPU pVCpu = &pVM->aCpus[iCpu];
2557 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.es.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
2558 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.cs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
2559 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ss.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
2560 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ds.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
2561 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.fs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
2562 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.gs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
2563 }
2564 }
2565
2566 pVM->cpum.s.fPendingRestore = false;
2567
2568 /*
2569 * Guest CPUIDs.
2570 */
2571 if (uVersion > CPUM_SAVED_STATE_VERSION_VER3_0)
2572 return cpumR3LoadCpuId(pVM, pSSM, uVersion);
2573
2574 /** @todo Merge the code below into cpumR3LoadCpuId when we've found out what is
2575 * actually required. */
2576
2577 /*
2578 * Restore the CPUID leaves.
2579 *
2580 * Note that we support restoring less than the current amount of standard
2581 * leaves because we've been allowed more is newer version of VBox.
2582 */
2583 uint32_t cElements;
2584 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
2585 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
2586 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2587 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdStd[0]));
2588
2589 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
2590 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
2591 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2592 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
2593
2594 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
2595 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
2596 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2597 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
2598
2599 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
2600
2601 /*
2602 * Check that the basic cpuid id information is unchanged.
2603 */
2604 /** @todo we should check the 64 bits capabilities too! */
2605 uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
2606 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
2607 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
2608 uint32_t au32CpuIdSaved[8];
2609 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
2610 if (RT_SUCCESS(rc))
2611 {
2612 /* Ignore CPU stepping. */
2613 au32CpuId[4] &= 0xfffffff0;
2614 au32CpuIdSaved[4] &= 0xfffffff0;
2615
2616 /* Ignore APIC ID (AMD specs). */
2617 au32CpuId[5] &= ~0xff000000;
2618 au32CpuIdSaved[5] &= ~0xff000000;
2619
2620 /* Ignore the number of Logical CPUs (AMD specs). */
2621 au32CpuId[5] &= ~0x00ff0000;
2622 au32CpuIdSaved[5] &= ~0x00ff0000;
2623
2624 /* Ignore some advanced capability bits, that we don't expose to the guest. */
2625 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
2626 | X86_CPUID_FEATURE_ECX_VMX
2627 | X86_CPUID_FEATURE_ECX_SMX
2628 | X86_CPUID_FEATURE_ECX_EST
2629 | X86_CPUID_FEATURE_ECX_TM2
2630 | X86_CPUID_FEATURE_ECX_CNTXID
2631 | X86_CPUID_FEATURE_ECX_TPRUPDATE
2632 | X86_CPUID_FEATURE_ECX_PDCM
2633 | X86_CPUID_FEATURE_ECX_DCA
2634 | X86_CPUID_FEATURE_ECX_X2APIC
2635 );
2636 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
2637 | X86_CPUID_FEATURE_ECX_VMX
2638 | X86_CPUID_FEATURE_ECX_SMX
2639 | X86_CPUID_FEATURE_ECX_EST
2640 | X86_CPUID_FEATURE_ECX_TM2
2641 | X86_CPUID_FEATURE_ECX_CNTXID
2642 | X86_CPUID_FEATURE_ECX_TPRUPDATE
2643 | X86_CPUID_FEATURE_ECX_PDCM
2644 | X86_CPUID_FEATURE_ECX_DCA
2645 | X86_CPUID_FEATURE_ECX_X2APIC
2646 );
2647
2648 /* Make sure we don't forget to update the masks when enabling
2649 * features in the future.
2650 */
2651 AssertRelease(!(pVM->cpum.s.aGuestCpuIdStd[1].ecx &
2652 ( X86_CPUID_FEATURE_ECX_DTES64
2653 | X86_CPUID_FEATURE_ECX_VMX
2654 | X86_CPUID_FEATURE_ECX_SMX
2655 | X86_CPUID_FEATURE_ECX_EST
2656 | X86_CPUID_FEATURE_ECX_TM2
2657 | X86_CPUID_FEATURE_ECX_CNTXID
2658 | X86_CPUID_FEATURE_ECX_TPRUPDATE
2659 | X86_CPUID_FEATURE_ECX_PDCM
2660 | X86_CPUID_FEATURE_ECX_DCA
2661 | X86_CPUID_FEATURE_ECX_X2APIC
2662 )));
2663 /* do the compare */
2664 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
2665 {
2666 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
2667 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
2668 "Saved=%.*Rhxs\n"
2669 "Real =%.*Rhxs\n",
2670 sizeof(au32CpuIdSaved), au32CpuIdSaved,
2671 sizeof(au32CpuId), au32CpuId));
2672 else
2673 {
2674 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
2675 "Saved=%.*Rhxs\n"
2676 "Real =%.*Rhxs\n",
2677 sizeof(au32CpuIdSaved), au32CpuIdSaved,
2678 sizeof(au32CpuId), au32CpuId));
2679 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
2680 }
2681 }
2682 }
2683
2684 return rc;
2685}
2686
2687
2688/**
2689 * @copydoc FNSSMINTLOADPREP
2690 */
2691static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
2692{
2693 if (RT_FAILURE(SSMR3HandleGetStatus(pSSM)))
2694 return VINF_SUCCESS;
2695
2696 /* just check this since we can. */ /** @todo Add a SSM unit flag for indicating that it's mandatory during a restore. */
2697 if (pVM->cpum.s.fPendingRestore)
2698 {
2699 LogRel(("CPUM: Missing state!\n"));
2700 return VERR_INTERNAL_ERROR_2;
2701 }
2702
2703 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
2704 {
2705 /* Notify PGM of the NXE states in case they've changed. */
2706 PGMNotifyNxeChanged(&pVM->aCpus[iCpu], !!(pVM->aCpus[iCpu].cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
2707
2708 /* Cache the local APIC base from the APIC device. During init. this is done in CPUMR3ResetCpu(). */
2709 PDMApicGetBase(&pVM->aCpus[iCpu], &pVM->aCpus[iCpu].cpum.s.Guest.msrApicBase);
2710 }
2711 return VINF_SUCCESS;
2712}
2713
2714
2715/**
2716 * Checks if the CPUM state restore is still pending.
2717 *
2718 * @returns true / false.
2719 * @param pVM Pointer to the VM.
2720 */
2721VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM)
2722{
2723 return pVM->cpum.s.fPendingRestore;
2724}
2725
2726
2727/**
2728 * Formats the EFLAGS value into mnemonics.
2729 *
2730 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
2731 * @param efl The EFLAGS value.
2732 */
2733static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
2734{
2735 /*
2736 * Format the flags.
2737 */
2738 static const struct
2739 {
2740 const char *pszSet; const char *pszClear; uint32_t fFlag;
2741 } s_aFlags[] =
2742 {
2743 { "vip",NULL, X86_EFL_VIP },
2744 { "vif",NULL, X86_EFL_VIF },
2745 { "ac", NULL, X86_EFL_AC },
2746 { "vm", NULL, X86_EFL_VM },
2747 { "rf", NULL, X86_EFL_RF },
2748 { "nt", NULL, X86_EFL_NT },
2749 { "ov", "nv", X86_EFL_OF },
2750 { "dn", "up", X86_EFL_DF },
2751 { "ei", "di", X86_EFL_IF },
2752 { "tf", NULL, X86_EFL_TF },
2753 { "nt", "pl", X86_EFL_SF },
2754 { "nz", "zr", X86_EFL_ZF },
2755 { "ac", "na", X86_EFL_AF },
2756 { "po", "pe", X86_EFL_PF },
2757 { "cy", "nc", X86_EFL_CF },
2758 };
2759 char *psz = pszEFlags;
2760 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
2761 {
2762 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
2763 if (pszAdd)
2764 {
2765 strcpy(psz, pszAdd);
2766 psz += strlen(pszAdd);
2767 *psz++ = ' ';
2768 }
2769 }
2770 psz[-1] = '\0';
2771}
2772
2773
2774/**
2775 * Formats a full register dump.
2776 *
2777 * @param pVM Pointer to the VM.
2778 * @param pCtx The context to format.
2779 * @param pCtxCore The context core to format.
2780 * @param pHlp Output functions.
2781 * @param enmType The dump type.
2782 * @param pszPrefix Register name prefix.
2783 */
2784static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType,
2785 const char *pszPrefix)
2786{
2787 NOREF(pVM);
2788
2789 /*
2790 * Format the EFLAGS.
2791 */
2792 uint32_t efl = pCtxCore->eflags.u32;
2793 char szEFlags[80];
2794 cpumR3InfoFormatFlags(&szEFlags[0], efl);
2795
2796 /*
2797 * Format the registers.
2798 */
2799 switch (enmType)
2800 {
2801 case CPUMDUMPTYPE_TERSE:
2802 if (CPUMIsGuestIn64BitCodeEx(pCtx))
2803 pHlp->pfnPrintf(pHlp,
2804 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
2805 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
2806 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
2807 "%sr14=%016RX64 %sr15=%016RX64\n"
2808 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
2809 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
2810 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
2811 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
2812 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
2813 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2814 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
2815 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
2816 else
2817 pHlp->pfnPrintf(pHlp,
2818 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
2819 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
2820 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
2821 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
2822 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2823 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
2824 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
2825 break;
2826
2827 case CPUMDUMPTYPE_DEFAULT:
2828 if (CPUMIsGuestIn64BitCodeEx(pCtx))
2829 pHlp->pfnPrintf(pHlp,
2830 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
2831 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
2832 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
2833 "%sr14=%016RX64 %sr15=%016RX64\n"
2834 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
2835 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
2836 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%016RX64:%04x %sldtr=%04x\n"
2837 ,
2838 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
2839 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
2840 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
2841 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2842 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
2843 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
2844 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
2845 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
2846 else
2847 pHlp->pfnPrintf(pHlp,
2848 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
2849 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
2850 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
2851 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%08RX64:%04x %sldtr=%04x\n"
2852 ,
2853 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
2854 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2855 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
2856 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
2857 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
2858 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
2859 break;
2860
2861 case CPUMDUMPTYPE_VERBOSE:
2862 if (CPUMIsGuestIn64BitCodeEx(pCtx))
2863 pHlp->pfnPrintf(pHlp,
2864 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
2865 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
2866 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
2867 "%sr14=%016RX64 %sr15=%016RX64\n"
2868 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
2869 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2870 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2871 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2872 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2873 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2874 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
2875 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
2876 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
2877 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
2878 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
2879 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
2880 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
2881 "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
2882 ,
2883 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
2884 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
2885 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
2886 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2887 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u,
2888 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u,
2889 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u,
2890 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u,
2891 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u,
2892 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u,
2893 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
2894 pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1], pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
2895 pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5], pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
2896 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
2897 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
2898 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2899 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
2900 else
2901 pHlp->pfnPrintf(pHlp,
2902 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
2903 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
2904 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
2905 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
2906 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
2907 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
2908 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
2909 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
2910 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
2911 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
2912 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
2913 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
2914 ,
2915 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
2916 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
2917 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u, pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1],
2918 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u, pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
2919 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u, pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5],
2920 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u, pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
2921 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
2922 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
2923 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
2924 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
2925 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2926 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
2927
2928 pHlp->pfnPrintf(pHlp,
2929 "%sFCW=%04x %sFSW=%04x %sFTW=%04x %sFOP=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
2930 "%sFPUIP=%08x %sCS=%04x %sRsrvd1=%04x %sFPUDP=%08x %sDS=%04x %sRsvrd2=%04x\n"
2931 ,
2932 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW, pszPrefix, pCtx->fpu.FOP,
2933 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK,
2934 pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsrvd1,
2935 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2
2936 );
2937 unsigned iShift = (pCtx->fpu.FSW >> 11) & 7;
2938 for (unsigned iST = 0; iST < RT_ELEMENTS(pCtx->fpu.aRegs); iST++)
2939 {
2940 unsigned iFPR = (iST + iShift) % RT_ELEMENTS(pCtx->fpu.aRegs);
2941 unsigned uTag = pCtx->fpu.FTW & (1 << iFPR) ? 1 : 0;
2942 char chSign = pCtx->fpu.aRegs[0].au16[4] & 0x8000 ? '-' : '+';
2943 unsigned iInteger = (unsigned)(pCtx->fpu.aRegs[0].au64[0] >> 63);
2944 uint64_t u64Fraction = pCtx->fpu.aRegs[0].au64[0] & UINT64_C(0x7fffffffffffffff);
2945 unsigned uExponent = pCtx->fpu.aRegs[0].au16[4] & 0x7fff;
2946 /** @todo This isn't entirenly correct and needs more work! */
2947 pHlp->pfnPrintf(pHlp,
2948 "%sST(%u)=%sFPR%u={%04RX16'%08RX32'%08RX32} t%d %c%u.%022llu ^ %u",
2949 pszPrefix, iST, pszPrefix, iFPR,
2950 pCtx->fpu.aRegs[0].au16[4], pCtx->fpu.aRegs[0].au32[1], pCtx->fpu.aRegs[0].au32[0],
2951 uTag, chSign, iInteger, u64Fraction, uExponent);
2952 if (pCtx->fpu.aRegs[0].au16[5] || pCtx->fpu.aRegs[0].au16[6] || pCtx->fpu.aRegs[0].au16[7])
2953 pHlp->pfnPrintf(pHlp, " res={%04RX16,%04RX16,%04RX16}\n",
2954 pCtx->fpu.aRegs[0].au16[5], pCtx->fpu.aRegs[0].au16[6], pCtx->fpu.aRegs[0].au16[7]);
2955 else
2956 pHlp->pfnPrintf(pHlp, "\n");
2957 }
2958 for (unsigned iXMM = 0; iXMM < RT_ELEMENTS(pCtx->fpu.aXMM); iXMM++)
2959 pHlp->pfnPrintf(pHlp,
2960 iXMM & 1
2961 ? "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32\n"
2962 : "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32 ",
2963 pszPrefix, iXMM, iXMM < 10 ? " " : "",
2964 pCtx->fpu.aXMM[iXMM].au32[3],
2965 pCtx->fpu.aXMM[iXMM].au32[2],
2966 pCtx->fpu.aXMM[iXMM].au32[1],
2967 pCtx->fpu.aXMM[iXMM].au32[0]);
2968 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->fpu.au32RsrvdRest); i++)
2969 if (pCtx->fpu.au32RsrvdRest[i])
2970 pHlp->pfnPrintf(pHlp, "%sRsrvdRest[i]=%RX32 (offset=%#x)\n",
2971 pszPrefix, i, pCtx->fpu.au32RsrvdRest[i], RT_OFFSETOF(X86FXSTATE, au32RsrvdRest[i]) );
2972
2973 pHlp->pfnPrintf(pHlp,
2974 "%sEFER =%016RX64\n"
2975 "%sPAT =%016RX64\n"
2976 "%sSTAR =%016RX64\n"
2977 "%sCSTAR =%016RX64\n"
2978 "%sLSTAR =%016RX64\n"
2979 "%sSFMASK =%016RX64\n"
2980 "%sKERNELGSBASE =%016RX64\n",
2981 pszPrefix, pCtx->msrEFER,
2982 pszPrefix, pCtx->msrPAT,
2983 pszPrefix, pCtx->msrSTAR,
2984 pszPrefix, pCtx->msrCSTAR,
2985 pszPrefix, pCtx->msrLSTAR,
2986 pszPrefix, pCtx->msrSFMASK,
2987 pszPrefix, pCtx->msrKERNELGSBASE);
2988 break;
2989 }
2990}
2991
2992
2993/**
2994 * Display all cpu states and any other cpum info.
2995 *
2996 * @param pVM Pointer to the VM.
2997 * @param pHlp The info helper functions.
2998 * @param pszArgs Arguments, ignored.
2999 */
3000static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3001{
3002 cpumR3InfoGuest(pVM, pHlp, pszArgs);
3003 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
3004 cpumR3InfoHyper(pVM, pHlp, pszArgs);
3005 cpumR3InfoHost(pVM, pHlp, pszArgs);
3006}
3007
3008
3009/**
3010 * Parses the info argument.
3011 *
3012 * The argument starts with 'verbose', 'terse' or 'default' and then
3013 * continues with the comment string.
3014 *
3015 * @param pszArgs The pointer to the argument string.
3016 * @param penmType Where to store the dump type request.
3017 * @param ppszComment Where to store the pointer to the comment string.
3018 */
3019static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
3020{
3021 if (!pszArgs)
3022 {
3023 *penmType = CPUMDUMPTYPE_DEFAULT;
3024 *ppszComment = "";
3025 }
3026 else
3027 {
3028 if (!strncmp(pszArgs, "verbose", sizeof("verbose") - 1))
3029 {
3030 pszArgs += 7;
3031 *penmType = CPUMDUMPTYPE_VERBOSE;
3032 }
3033 else if (!strncmp(pszArgs, "terse", sizeof("terse") - 1))
3034 {
3035 pszArgs += 5;
3036 *penmType = CPUMDUMPTYPE_TERSE;
3037 }
3038 else if (!strncmp(pszArgs, "default", sizeof("default") - 1))
3039 {
3040 pszArgs += 7;
3041 *penmType = CPUMDUMPTYPE_DEFAULT;
3042 }
3043 else
3044 *penmType = CPUMDUMPTYPE_DEFAULT;
3045 *ppszComment = RTStrStripL(pszArgs);
3046 }
3047}
3048
3049
3050/**
3051 * Display the guest cpu state.
3052 *
3053 * @param pVM Pointer to the VM.
3054 * @param pHlp The info helper functions.
3055 * @param pszArgs Arguments, ignored.
3056 */
3057static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3058{
3059 CPUMDUMPTYPE enmType;
3060 const char *pszComment;
3061 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
3062
3063 /* @todo SMP support! */
3064 PVMCPU pVCpu = VMMGetCpu(pVM);
3065 if (!pVCpu)
3066 pVCpu = &pVM->aCpus[0];
3067
3068 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
3069
3070 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3071 cpumR3InfoOne(pVM, pCtx, CPUMCTX2CORE(pCtx), pHlp, enmType, "");
3072}
3073
3074
3075/**
3076 * Display the current guest instruction
3077 *
3078 * @param pVM Pointer to the VM.
3079 * @param pHlp The info helper functions.
3080 * @param pszArgs Arguments, ignored.
3081 */
3082static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3083{
3084 NOREF(pszArgs);
3085
3086 /** @todo SMP support! */
3087 PVMCPU pVCpu = VMMGetCpu(pVM);
3088 if (!pVCpu)
3089 pVCpu = &pVM->aCpus[0];
3090
3091 char szInstruction[256];
3092 int rc = DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
3093 if (RT_SUCCESS(rc))
3094 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
3095}
3096
3097
3098/**
3099 * Display the hypervisor cpu state.
3100 *
3101 * @param pVM Pointer to the VM.
3102 * @param pHlp The info helper functions.
3103 * @param pszArgs Arguments, ignored.
3104 */
3105static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3106{
3107 CPUMDUMPTYPE enmType;
3108 const char *pszComment;
3109 /* @todo SMP */
3110 PVMCPU pVCpu = &pVM->aCpus[0];
3111
3112 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
3113 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
3114 cpumR3InfoOne(pVM, &pVCpu->cpum.s.Hyper, CPUMCTX2CORE(&pVCpu->cpum.s.Hyper), pHlp, enmType, ".");
3115 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
3116}
3117
3118
3119/**
3120 * Display the host cpu state.
3121 *
3122 * @param pVM Pointer to the VM.
3123 * @param pHlp The info helper functions.
3124 * @param pszArgs Arguments, ignored.
3125 */
3126static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3127{
3128 CPUMDUMPTYPE enmType;
3129 const char *pszComment;
3130 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
3131 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
3132
3133 /*
3134 * Format the EFLAGS.
3135 */
3136 /* @todo SMP */
3137 PCPUMHOSTCTX pCtx = &pVM->aCpus[0].cpum.s.Host;
3138#if HC_ARCH_BITS == 32
3139 uint32_t efl = pCtx->eflags.u32;
3140#else
3141 uint64_t efl = pCtx->rflags;
3142#endif
3143 char szEFlags[80];
3144 cpumR3InfoFormatFlags(&szEFlags[0], efl);
3145
3146 /*
3147 * Format the registers.
3148 */
3149#if HC_ARCH_BITS == 32
3150# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3151 if (!(pCtx->efer & MSR_K6_EFER_LMA))
3152# endif
3153 {
3154 pHlp->pfnPrintf(pHlp,
3155 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
3156 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
3157 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
3158 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
3159 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n"
3160 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
3161 ,
3162 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
3163 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
3164 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
3165 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
3166 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
3167 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->ldtr,
3168 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
3169 }
3170# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3171 else
3172# endif
3173#endif
3174#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3175 {
3176 pHlp->pfnPrintf(pHlp,
3177 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
3178 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
3179 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
3180 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
3181 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
3182 "r14=%016RX64 r15=%016RX64\n"
3183 "iopl=%d %31s\n"
3184 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
3185 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
3186 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
3187 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n"
3188 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n"
3189 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
3190 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
3191 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
3192 ,
3193 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
3194 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
3195 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
3196 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
3197 pCtx->r11, pCtx->r12, pCtx->r13,
3198 pCtx->r14, pCtx->r15,
3199 X86_EFL_GET_IOPL(efl), szEFlags,
3200 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
3201 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
3202 pCtx->cr4, pCtx->ldtr, pCtx->tr,
3203 pCtx->dr0, pCtx->dr1, pCtx->dr2,
3204 pCtx->dr3, pCtx->dr6, pCtx->dr7,
3205 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
3206 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
3207 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
3208 }
3209#endif
3210}
3211
3212
3213/**
3214 * Get L1 cache / TLS associativity.
3215 */
3216static const char *getCacheAss(unsigned u, char *pszBuf)
3217{
3218 if (u == 0)
3219 return "res0 ";
3220 if (u == 1)
3221 return "direct";
3222 if (u == 255)
3223 return "fully";
3224 if (u >= 256)
3225 return "???";
3226
3227 RTStrPrintf(pszBuf, 16, "%d way", u);
3228 return pszBuf;
3229}
3230
3231
3232/**
3233 * Get L2 cache associativity.
3234 */
3235const char *getL2CacheAss(unsigned u)
3236{
3237 switch (u)
3238 {
3239 case 0: return "off ";
3240 case 1: return "direct";
3241 case 2: return "2 way ";
3242 case 3: return "res3 ";
3243 case 4: return "4 way ";
3244 case 5: return "res5 ";
3245 case 6: return "8 way ";
3246 case 7: return "res7 ";
3247 case 8: return "16 way";
3248 case 9: return "res9 ";
3249 case 10: return "res10 ";
3250 case 11: return "res11 ";
3251 case 12: return "res12 ";
3252 case 13: return "res13 ";
3253 case 14: return "res14 ";
3254 case 15: return "fully ";
3255 default: return "????";
3256 }
3257}
3258
3259
3260/**
3261 * Display the guest CpuId leaves.
3262 *
3263 * @param pVM Pointer to the VM.
3264 * @param pHlp The info helper functions.
3265 * @param pszArgs "terse", "default" or "verbose".
3266 */
3267static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3268{
3269 /*
3270 * Parse the argument.
3271 */
3272 unsigned iVerbosity = 1;
3273 if (pszArgs)
3274 {
3275 pszArgs = RTStrStripL(pszArgs);
3276 if (!strcmp(pszArgs, "terse"))
3277 iVerbosity--;
3278 else if (!strcmp(pszArgs, "verbose"))
3279 iVerbosity++;
3280 }
3281
3282 /*
3283 * Start cracking.
3284 */
3285 CPUMCPUID Host;
3286 CPUMCPUID Guest;
3287 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdStd[0].eax;
3288
3289 uint32_t cStdHstMax;
3290 uint32_t dummy;
3291 ASMCpuId_Idx_ECX(0, 0, &cStdHstMax, &dummy, &dummy, &dummy);
3292
3293 unsigned cStdLstMax = RT_MAX(RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd), cStdHstMax);
3294
3295 pHlp->pfnPrintf(pHlp,
3296 " RAW Standard CPUIDs\n"
3297 " Function eax ebx ecx edx\n");
3298 for (unsigned i = 0; i <= cStdLstMax ; i++)
3299 {
3300 if (i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
3301 {
3302 Guest = pVM->cpum.s.aGuestCpuIdStd[i];
3303 ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3304
3305 pHlp->pfnPrintf(pHlp,
3306 "Gst: %08x %08x %08x %08x %08x%s\n"
3307 "Hst: %08x %08x %08x %08x\n",
3308 i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
3309 i <= cStdMax ? "" : "*",
3310 Host.eax, Host.ebx, Host.ecx, Host.edx);
3311 }
3312 else
3313 {
3314 ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3315
3316 pHlp->pfnPrintf(pHlp,
3317 "Hst: %08x %08x %08x %08x %08x\n",
3318 i, Host.eax, Host.ebx, Host.ecx, Host.edx);
3319 }
3320 }
3321
3322 /*
3323 * If verbose, decode it.
3324 */
3325 if (iVerbosity)
3326 {
3327 Guest = pVM->cpum.s.aGuestCpuIdStd[0];
3328 pHlp->pfnPrintf(pHlp,
3329 "Name: %.04s%.04s%.04s\n"
3330 "Supports: 0-%x\n",
3331 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
3332 }
3333
3334 /*
3335 * Get Features.
3336 */
3337 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdStd[0].ebx,
3338 pVM->cpum.s.aGuestCpuIdStd[0].ecx,
3339 pVM->cpum.s.aGuestCpuIdStd[0].edx);
3340 if (cStdMax >= 1 && iVerbosity)
3341 {
3342 static const char * const s_apszTypes[4] = { "primary", "overdrive", "MP", "reserved" };
3343
3344 Guest = pVM->cpum.s.aGuestCpuIdStd[1];
3345 uint32_t uEAX = Guest.eax;
3346
3347 pHlp->pfnPrintf(pHlp,
3348 "Family: %d \tExtended: %d \tEffective: %d\n"
3349 "Model: %d \tExtended: %d \tEffective: %d\n"
3350 "Stepping: %d\n"
3351 "Type: %d (%s)\n"
3352 "APIC ID: %#04x\n"
3353 "Logical CPUs: %d\n"
3354 "CLFLUSH Size: %d\n"
3355 "Brand ID: %#04x\n",
3356 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
3357 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
3358 ASMGetCpuStepping(uEAX),
3359 (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
3360 (Guest.ebx >> 24) & 0xff,
3361 (Guest.ebx >> 16) & 0xff,
3362 (Guest.ebx >> 8) & 0xff,
3363 (Guest.ebx >> 0) & 0xff);
3364 if (iVerbosity == 1)
3365 {
3366 uint32_t uEDX = Guest.edx;
3367 pHlp->pfnPrintf(pHlp, "Features EDX: ");
3368 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
3369 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
3370 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
3371 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
3372 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
3373 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
3374 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
3375 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
3376 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
3377 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
3378 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
3379 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SEP");
3380 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
3381 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
3382 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
3383 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
3384 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
3385 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
3386 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " PSN");
3387 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " CLFSH");
3388 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " 20");
3389 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " DS");
3390 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ACPI");
3391 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
3392 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
3393 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " SSE");
3394 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " SSE2");
3395 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " SS");
3396 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " HTT");
3397 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " TM");
3398 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
3399 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " PBE");
3400 pHlp->pfnPrintf(pHlp, "\n");
3401
3402 uint32_t uECX = Guest.ecx;
3403 pHlp->pfnPrintf(pHlp, "Features ECX: ");
3404 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3");
3405 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " PCLMUL");
3406 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DTES64");
3407 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " MONITOR");
3408 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " DS-CPL");
3409 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " VMX");
3410 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SMX");
3411 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " EST");
3412 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " TM2");
3413 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " SSSE3");
3414 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " CNXT-ID");
3415 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " 11");
3416 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " FMA");
3417 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " CX16");
3418 if (uECX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " TPRUPDATE");
3419 if (uECX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " PDCM");
3420 if (uECX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " 16");
3421 if (uECX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PCID");
3422 if (uECX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " DCA");
3423 if (uECX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " SSE4.1");
3424 if (uECX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " SSE4.2");
3425 if (uECX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " X2APIC");
3426 if (uECX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " MOVBE");
3427 if (uECX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " POPCNT");
3428 if (uECX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " TSCDEADL");
3429 if (uECX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " AES");
3430 if (uECX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " XSAVE");
3431 if (uECX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " OSXSAVE");
3432 if (uECX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " AVX");
3433 if (uECX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " 29");
3434 if (uECX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
3435 if (uECX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 31");
3436 pHlp->pfnPrintf(pHlp, "\n");
3437 }
3438 else
3439 {
3440 ASMCpuId(1, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3441
3442 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
3443 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
3444 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
3445 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
3446
3447 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
3448 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", EdxGuest.u1FPU, EdxHost.u1FPU);
3449 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", EdxGuest.u1VME, EdxHost.u1VME);
3450 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", EdxGuest.u1DE, EdxHost.u1DE);
3451 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", EdxGuest.u1PSE, EdxHost.u1PSE);
3452 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", EdxGuest.u1TSC, EdxHost.u1TSC);
3453 pHlp->pfnPrintf(pHlp, "MSR - Model Specific Registers = %d (%d)\n", EdxGuest.u1MSR, EdxHost.u1MSR);
3454 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", EdxGuest.u1PAE, EdxHost.u1PAE);
3455 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", EdxGuest.u1MCE, EdxHost.u1MCE);
3456 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", EdxGuest.u1CX8, EdxHost.u1CX8);
3457 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", EdxGuest.u1APIC, EdxHost.u1APIC);
3458 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", EdxGuest.u1Reserved1, EdxHost.u1Reserved1);
3459 pHlp->pfnPrintf(pHlp, "SEP - SYSENTER and SYSEXIT = %d (%d)\n", EdxGuest.u1SEP, EdxHost.u1SEP);
3460 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", EdxGuest.u1MTRR, EdxHost.u1MTRR);
3461 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", EdxGuest.u1PGE, EdxHost.u1PGE);
3462 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", EdxGuest.u1MCA, EdxHost.u1MCA);
3463 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", EdxGuest.u1CMOV, EdxHost.u1CMOV);
3464 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", EdxGuest.u1PAT, EdxHost.u1PAT);
3465 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", EdxGuest.u1PSE36, EdxHost.u1PSE36);
3466 pHlp->pfnPrintf(pHlp, "PSN - Processor Serial Number = %d (%d)\n", EdxGuest.u1PSN, EdxHost.u1PSN);
3467 pHlp->pfnPrintf(pHlp, "CLFSH - CLFLUSH Instruction. = %d (%d)\n", EdxGuest.u1CLFSH, EdxHost.u1CLFSH);
3468 pHlp->pfnPrintf(pHlp, "20 - Reserved = %d (%d)\n", EdxGuest.u1Reserved2, EdxHost.u1Reserved2);
3469 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", EdxGuest.u1DS, EdxHost.u1DS);
3470 pHlp->pfnPrintf(pHlp, "ACPI - Thermal Mon. & Soft. Clock Ctrl.= %d (%d)\n", EdxGuest.u1ACPI, EdxHost.u1ACPI);
3471 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", EdxGuest.u1MMX, EdxHost.u1MMX);
3472 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", EdxGuest.u1FXSR, EdxHost.u1FXSR);
3473 pHlp->pfnPrintf(pHlp, "SSE - SSE Support = %d (%d)\n", EdxGuest.u1SSE, EdxHost.u1SSE);
3474 pHlp->pfnPrintf(pHlp, "SSE2 - SSE2 Support = %d (%d)\n", EdxGuest.u1SSE2, EdxHost.u1SSE2);
3475 pHlp->pfnPrintf(pHlp, "SS - Self Snoop = %d (%d)\n", EdxGuest.u1SS, EdxHost.u1SS);
3476 pHlp->pfnPrintf(pHlp, "HTT - Hyper-Threading Technology = %d (%d)\n", EdxGuest.u1HTT, EdxHost.u1HTT);
3477 pHlp->pfnPrintf(pHlp, "TM - Thermal Monitor = %d (%d)\n", EdxGuest.u1TM, EdxHost.u1TM);
3478 pHlp->pfnPrintf(pHlp, "30 - Reserved = %d (%d)\n", EdxGuest.u1Reserved3, EdxHost.u1Reserved3);
3479 pHlp->pfnPrintf(pHlp, "PBE - Pending Break Enable = %d (%d)\n", EdxGuest.u1PBE, EdxHost.u1PBE);
3480
3481 pHlp->pfnPrintf(pHlp, "Supports SSE3 = %d (%d)\n", EcxGuest.u1SSE3, EcxHost.u1SSE3);
3482 pHlp->pfnPrintf(pHlp, "PCLMULQDQ = %d (%d)\n", EcxGuest.u1PCLMULQDQ, EcxHost.u1PCLMULQDQ);
3483 pHlp->pfnPrintf(pHlp, "DS Area 64-bit layout = %d (%d)\n", EcxGuest.u1DTE64, EcxHost.u1DTE64);
3484 pHlp->pfnPrintf(pHlp, "Supports MONITOR/MWAIT = %d (%d)\n", EcxGuest.u1Monitor, EcxHost.u1Monitor);
3485 pHlp->pfnPrintf(pHlp, "CPL-DS - CPL Qualified Debug Store = %d (%d)\n", EcxGuest.u1CPLDS, EcxHost.u1CPLDS);
3486 pHlp->pfnPrintf(pHlp, "VMX - Virtual Machine Technology = %d (%d)\n", EcxGuest.u1VMX, EcxHost.u1VMX);
3487 pHlp->pfnPrintf(pHlp, "SMX - Safer Mode Extensions = %d (%d)\n", EcxGuest.u1SMX, EcxHost.u1SMX);
3488 pHlp->pfnPrintf(pHlp, "Enhanced SpeedStep Technology = %d (%d)\n", EcxGuest.u1EST, EcxHost.u1EST);
3489 pHlp->pfnPrintf(pHlp, "Terminal Monitor 2 = %d (%d)\n", EcxGuest.u1TM2, EcxHost.u1TM2);
3490 pHlp->pfnPrintf(pHlp, "Supplemental SSE3 instructions = %d (%d)\n", EcxGuest.u1SSSE3, EcxHost.u1SSSE3);
3491 pHlp->pfnPrintf(pHlp, "L1 Context ID = %d (%d)\n", EcxGuest.u1CNTXID, EcxHost.u1CNTXID);
3492 pHlp->pfnPrintf(pHlp, "11 - Reserved = %d (%d)\n", EcxGuest.u1Reserved1, EcxHost.u1Reserved1);
3493 pHlp->pfnPrintf(pHlp, "FMA extensions using YMM state = %d (%d)\n", EcxGuest.u1FMA, EcxHost.u1FMA);
3494 pHlp->pfnPrintf(pHlp, "CMPXCHG16B instruction = %d (%d)\n", EcxGuest.u1CX16, EcxHost.u1CX16);
3495 pHlp->pfnPrintf(pHlp, "xTPR Update Control = %d (%d)\n", EcxGuest.u1TPRUpdate, EcxHost.u1TPRUpdate);
3496 pHlp->pfnPrintf(pHlp, "Perf/Debug Capability MSR = %d (%d)\n", EcxGuest.u1PDCM, EcxHost.u1PDCM);
3497 pHlp->pfnPrintf(pHlp, "16 - Reserved = %d (%d)\n", EcxGuest.u1Reserved2, EcxHost.u1Reserved2);
3498 pHlp->pfnPrintf(pHlp, "PCID - Process-context identifiers = %d (%d)\n", EcxGuest.u1PCID, EcxHost.u1PCID);
3499 pHlp->pfnPrintf(pHlp, "DCA - Direct Cache Access = %d (%d)\n", EcxGuest.u1DCA, EcxHost.u1DCA);
3500 pHlp->pfnPrintf(pHlp, "SSE4.1 instruction extensions = %d (%d)\n", EcxGuest.u1SSE4_1, EcxHost.u1SSE4_1);
3501 pHlp->pfnPrintf(pHlp, "SSE4.2 instruction extensions = %d (%d)\n", EcxGuest.u1SSE4_2, EcxHost.u1SSE4_2);
3502 pHlp->pfnPrintf(pHlp, "Supports the x2APIC extensions = %d (%d)\n", EcxGuest.u1x2APIC, EcxHost.u1x2APIC);
3503 pHlp->pfnPrintf(pHlp, "MOVBE instruction = %d (%d)\n", EcxGuest.u1MOVBE, EcxHost.u1MOVBE);
3504 pHlp->pfnPrintf(pHlp, "POPCNT instruction = %d (%d)\n", EcxGuest.u1POPCNT, EcxHost.u1POPCNT);
3505 pHlp->pfnPrintf(pHlp, "TSC-Deadline LAPIC timer mode = %d (%d)\n", EcxGuest.u1TSCDEADLINE,EcxHost.u1TSCDEADLINE);
3506 pHlp->pfnPrintf(pHlp, "AESNI instruction extensions = %d (%d)\n", EcxGuest.u1AES, EcxHost.u1AES);
3507 pHlp->pfnPrintf(pHlp, "XSAVE/XRSTOR extended state feature = %d (%d)\n", EcxGuest.u1XSAVE, EcxHost.u1XSAVE);
3508 pHlp->pfnPrintf(pHlp, "Supports OSXSAVE = %d (%d)\n", EcxGuest.u1OSXSAVE, EcxHost.u1OSXSAVE);
3509 pHlp->pfnPrintf(pHlp, "AVX instruction extensions = %d (%d)\n", EcxGuest.u1AVX, EcxHost.u1AVX);
3510 pHlp->pfnPrintf(pHlp, "29/30 - Reserved = %#x (%#x)\n",EcxGuest.u2Reserved3, EcxHost.u2Reserved3);
3511 pHlp->pfnPrintf(pHlp, "Hypervisor Present (we're a guest) = %d (%d)\n", EcxGuest.u1HVP, EcxHost.u1HVP);
3512 }
3513 }
3514 if (cStdMax >= 2 && iVerbosity)
3515 {
3516 /** @todo */
3517 }
3518
3519 /*
3520 * Extended.
3521 * Implemented after AMD specs.
3522 */
3523 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdExt[0].eax & 0xffff;
3524
3525 pHlp->pfnPrintf(pHlp,
3526 "\n"
3527 " RAW Extended CPUIDs\n"
3528 " Function eax ebx ecx edx\n");
3529 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt); i++)
3530 {
3531 Guest = pVM->cpum.s.aGuestCpuIdExt[i];
3532 ASMCpuId(0x80000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3533
3534 pHlp->pfnPrintf(pHlp,
3535 "Gst: %08x %08x %08x %08x %08x%s\n"
3536 "Hst: %08x %08x %08x %08x\n",
3537 0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
3538 i <= cExtMax ? "" : "*",
3539 Host.eax, Host.ebx, Host.ecx, Host.edx);
3540 }
3541
3542 /*
3543 * Understandable output
3544 */
3545 if (iVerbosity)
3546 {
3547 Guest = pVM->cpum.s.aGuestCpuIdExt[0];
3548 pHlp->pfnPrintf(pHlp,
3549 "Ext Name: %.4s%.4s%.4s\n"
3550 "Ext Supports: 0x80000000-%#010x\n",
3551 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
3552 }
3553
3554 if (iVerbosity && cExtMax >= 1)
3555 {
3556 Guest = pVM->cpum.s.aGuestCpuIdExt[1];
3557 uint32_t uEAX = Guest.eax;
3558 pHlp->pfnPrintf(pHlp,
3559 "Family: %d \tExtended: %d \tEffective: %d\n"
3560 "Model: %d \tExtended: %d \tEffective: %d\n"
3561 "Stepping: %d\n"
3562 "Brand ID: %#05x\n",
3563 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
3564 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
3565 ASMGetCpuStepping(uEAX),
3566 Guest.ebx & 0xfff);
3567
3568 if (iVerbosity == 1)
3569 {
3570 uint32_t uEDX = Guest.edx;
3571 pHlp->pfnPrintf(pHlp, "Features EDX: ");
3572 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
3573 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
3574 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
3575 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
3576 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
3577 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
3578 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
3579 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
3580 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
3581 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
3582 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
3583 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SCR");
3584 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
3585 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
3586 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
3587 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
3588 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
3589 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
3590 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " 18");
3591 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " 19");
3592 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " NX");
3593 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " 21");
3594 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ExtMMX");
3595 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
3596 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
3597 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " FastFXSR");
3598 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " Page1GB");
3599 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " RDTSCP");
3600 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " 28");
3601 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " LongMode");
3602 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " Ext3DNow");
3603 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 3DNow");
3604 pHlp->pfnPrintf(pHlp, "\n");
3605
3606 uint32_t uECX = Guest.ecx;
3607 pHlp->pfnPrintf(pHlp, "Features ECX: ");
3608 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
3609 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " CMPL");
3610 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " SVM");
3611 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " ExtAPIC");
3612 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " CR8L");
3613 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " ABM");
3614 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SSE4A");
3615 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MISALNSSE");
3616 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " 3DNOWPRF");
3617 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " OSVW");
3618 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " IBS");
3619 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SSE5");
3620 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " SKINIT");
3621 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " WDT");
3622 for (unsigned iBit = 5; iBit < 32; iBit++)
3623 if (uECX & RT_BIT(iBit))
3624 pHlp->pfnPrintf(pHlp, " %d", iBit);
3625 pHlp->pfnPrintf(pHlp, "\n");
3626 }
3627 else
3628 {
3629 ASMCpuId(0x80000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3630
3631 uint32_t uEdxGst = Guest.edx;
3632 uint32_t uEdxHst = Host.edx;
3633 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
3634 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
3635 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
3636 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
3637 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
3638 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
3639 pHlp->pfnPrintf(pHlp, "MSR - K86 Model Specific Registers = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
3640 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
3641 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
3642 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
3643 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
3644 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
3645 pHlp->pfnPrintf(pHlp, "SEP - SYSCALL and SYSRET = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
3646 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
3647 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
3648 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
3649 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
3650 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
3651 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
3652 pHlp->pfnPrintf(pHlp, "18 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
3653 pHlp->pfnPrintf(pHlp, "19 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
3654 pHlp->pfnPrintf(pHlp, "NX - No-Execute Page Protection = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
3655 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
3656 pHlp->pfnPrintf(pHlp, "AXMMX - AMD Extensions to MMX Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
3657 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
3658 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
3659 pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
3660 pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
3661 pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));
3662 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28)));
3663 pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));
3664 pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow! = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));
3665 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow! = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));
3666
3667 uint32_t uEcxGst = Guest.ecx;
3668 uint32_t uEcxHst = Host.ecx;
3669 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0)));
3670 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1)));
3671 pHlp->pfnPrintf(pHlp, "SVM - AMD VM Extensions = %d (%d)\n", !!(uEcxGst & RT_BIT( 2)), !!(uEcxHst & RT_BIT( 2)));
3672 pHlp->pfnPrintf(pHlp, "APIC registers starting at 0x400 = %d (%d)\n", !!(uEcxGst & RT_BIT( 3)), !!(uEcxHst & RT_BIT( 3)));
3673 pHlp->pfnPrintf(pHlp, "AltMovCR8 - LOCK MOV CR0 means MOV CR8 = %d (%d)\n", !!(uEcxGst & RT_BIT( 4)), !!(uEcxHst & RT_BIT( 4)));
3674 pHlp->pfnPrintf(pHlp, "5 - Advanced bit manipulation = %d (%d)\n", !!(uEcxGst & RT_BIT( 5)), !!(uEcxHst & RT_BIT( 5)));
3675 pHlp->pfnPrintf(pHlp, "6 - SSE4A instruction support = %d (%d)\n", !!(uEcxGst & RT_BIT( 6)), !!(uEcxHst & RT_BIT( 6)));
3676 pHlp->pfnPrintf(pHlp, "7 - Misaligned SSE mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 7)), !!(uEcxHst & RT_BIT( 7)));
3677 pHlp->pfnPrintf(pHlp, "8 - PREFETCH and PREFETCHW instruction= %d (%d)\n", !!(uEcxGst & RT_BIT( 8)), !!(uEcxHst & RT_BIT( 8)));
3678 pHlp->pfnPrintf(pHlp, "9 - OS visible workaround = %d (%d)\n", !!(uEcxGst & RT_BIT( 9)), !!(uEcxHst & RT_BIT( 9)));
3679 pHlp->pfnPrintf(pHlp, "10 - Instruction based sampling = %d (%d)\n", !!(uEcxGst & RT_BIT(10)), !!(uEcxHst & RT_BIT(10)));
3680 pHlp->pfnPrintf(pHlp, "11 - SSE5 support = %d (%d)\n", !!(uEcxGst & RT_BIT(11)), !!(uEcxHst & RT_BIT(11)));
3681 pHlp->pfnPrintf(pHlp, "12 - SKINIT, STGI, and DEV support = %d (%d)\n", !!(uEcxGst & RT_BIT(12)), !!(uEcxHst & RT_BIT(12)));
3682 pHlp->pfnPrintf(pHlp, "13 - Watchdog timer support. = %d (%d)\n", !!(uEcxGst & RT_BIT(13)), !!(uEcxHst & RT_BIT(13)));
3683 pHlp->pfnPrintf(pHlp, "31:14 - Reserved = %#x (%#x)\n", uEcxGst >> 14, uEcxHst >> 14);
3684 }
3685 }
3686
3687 if (iVerbosity && cExtMax >= 2)
3688 {
3689 char szString[4*4*3+1] = {0};
3690 uint32_t *pu32 = (uint32_t *)szString;
3691 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].eax;
3692 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ebx;
3693 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ecx;
3694 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].edx;
3695 if (cExtMax >= 3)
3696 {
3697 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].eax;
3698 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ebx;
3699 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ecx;
3700 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].edx;
3701 }
3702 if (cExtMax >= 4)
3703 {
3704 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].eax;
3705 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ebx;
3706 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ecx;
3707 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].edx;
3708 }
3709 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString);
3710 }
3711
3712 if (iVerbosity && cExtMax >= 5)
3713 {
3714 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[5].eax;
3715 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[5].ebx;
3716 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[5].ecx;
3717 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[5].edx;
3718 char sz1[32];
3719 char sz2[32];
3720
3721 pHlp->pfnPrintf(pHlp,
3722 "TLB 2/4M Instr/Uni: %s %3d entries\n"
3723 "TLB 2/4M Data: %s %3d entries\n",
3724 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
3725 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
3726 pHlp->pfnPrintf(pHlp,
3727 "TLB 4K Instr/Uni: %s %3d entries\n"
3728 "TLB 4K Data: %s %3d entries\n",
3729 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
3730 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
3731 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
3732 "L1 Instr Cache Lines Per Tag: %d\n"
3733 "L1 Instr Cache Associativity: %s\n"
3734 "L1 Instr Cache Size: %d KB\n",
3735 (uEDX >> 0) & 0xff,
3736 (uEDX >> 8) & 0xff,
3737 getCacheAss((uEDX >> 16) & 0xff, sz1),
3738 (uEDX >> 24) & 0xff);
3739 pHlp->pfnPrintf(pHlp,
3740 "L1 Data Cache Line Size: %d bytes\n"
3741 "L1 Data Cache Lines Per Tag: %d\n"
3742 "L1 Data Cache Associativity: %s\n"
3743 "L1 Data Cache Size: %d KB\n",
3744 (uECX >> 0) & 0xff,
3745 (uECX >> 8) & 0xff,
3746 getCacheAss((uECX >> 16) & 0xff, sz1),
3747 (uECX >> 24) & 0xff);
3748 }
3749
3750 if (iVerbosity && cExtMax >= 6)
3751 {
3752 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[6].eax;
3753 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[6].ebx;
3754 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[6].edx;
3755
3756 pHlp->pfnPrintf(pHlp,
3757 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
3758 "L2 TLB 2/4M Data: %s %4d entries\n",
3759 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
3760 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
3761 pHlp->pfnPrintf(pHlp,
3762 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
3763 "L2 TLB 4K Data: %s %4d entries\n",
3764 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
3765 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
3766 pHlp->pfnPrintf(pHlp,
3767 "L2 Cache Line Size: %d bytes\n"
3768 "L2 Cache Lines Per Tag: %d\n"
3769 "L2 Cache Associativity: %s\n"
3770 "L2 Cache Size: %d KB\n",
3771 (uEDX >> 0) & 0xff,
3772 (uEDX >> 8) & 0xf,
3773 getL2CacheAss((uEDX >> 12) & 0xf),
3774 (uEDX >> 16) & 0xffff);
3775 }
3776
3777 if (iVerbosity && cExtMax >= 7)
3778 {
3779 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[7].edx;
3780
3781 pHlp->pfnPrintf(pHlp, "APM Features: ");
3782 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " TS");
3783 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " FID");
3784 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " VID");
3785 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " TTP");
3786 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TM");
3787 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " STC");
3788 for (unsigned iBit = 6; iBit < 32; iBit++)
3789 if (uEDX & RT_BIT(iBit))
3790 pHlp->pfnPrintf(pHlp, " %d", iBit);
3791 pHlp->pfnPrintf(pHlp, "\n");
3792 }
3793
3794 if (iVerbosity && cExtMax >= 8)
3795 {
3796 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[8].eax;
3797 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[8].ecx;
3798
3799 pHlp->pfnPrintf(pHlp,
3800 "Physical Address Width: %d bits\n"
3801 "Virtual Address Width: %d bits\n"
3802 "Guest Physical Address Width: %d bits\n",
3803 (uEAX >> 0) & 0xff,
3804 (uEAX >> 8) & 0xff,
3805 (uEAX >> 16) & 0xff);
3806 pHlp->pfnPrintf(pHlp,
3807 "Physical Core Count: %d\n",
3808 (uECX >> 0) & 0xff);
3809 }
3810
3811
3812 /*
3813 * Centaur.
3814 */
3815 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdCentaur[0].eax & 0xffff;
3816
3817 pHlp->pfnPrintf(pHlp,
3818 "\n"
3819 " RAW Centaur CPUIDs\n"
3820 " Function eax ebx ecx edx\n");
3821 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); i++)
3822 {
3823 Guest = pVM->cpum.s.aGuestCpuIdCentaur[i];
3824 ASMCpuId(0xc0000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3825
3826 pHlp->pfnPrintf(pHlp,
3827 "Gst: %08x %08x %08x %08x %08x%s\n"
3828 "Hst: %08x %08x %08x %08x\n",
3829 0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
3830 i <= cCentaurMax ? "" : "*",
3831 Host.eax, Host.ebx, Host.ecx, Host.edx);
3832 }
3833
3834 /*
3835 * Understandable output
3836 */
3837 if (iVerbosity)
3838 {
3839 Guest = pVM->cpum.s.aGuestCpuIdCentaur[0];
3840 pHlp->pfnPrintf(pHlp,
3841 "Centaur Supports: 0xc0000000-%#010x\n",
3842 Guest.eax);
3843 }
3844
3845 if (iVerbosity && cCentaurMax >= 1)
3846 {
3847 ASMCpuId(0xc0000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
3848 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdExt[1].edx;
3849 uint32_t uEdxHst = Host.edx;
3850
3851 if (iVerbosity == 1)
3852 {
3853 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
3854 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
3855 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
3856 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
3857 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
3858 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
3859 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
3860 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
3861 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
3862 /* possibly indicating MM/HE and MM/HE-E on older chips... */
3863 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
3864 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
3865 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
3866 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
3867 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
3868 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
3869 for (unsigned iBit = 14; iBit < 32; iBit++)
3870 if (uEdxGst & RT_BIT(iBit))
3871 pHlp->pfnPrintf(pHlp, " %d", iBit);
3872 pHlp->pfnPrintf(pHlp, "\n");
3873 }
3874 else
3875 {
3876 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
3877 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
3878 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
3879 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
3880 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
3881 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
3882 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
3883 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
3884 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
3885 /* possibly indicating MM/HE and MM/HE-E on older chips... */
3886 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
3887 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
3888 pHlp->pfnPrintf(pHlp, "PHE - Padlock Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
3889 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
3890 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
3891 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
3892 pHlp->pfnPrintf(pHlp, "14 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
3893 pHlp->pfnPrintf(pHlp, "15 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
3894 pHlp->pfnPrintf(pHlp, "Parallax = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
3895 pHlp->pfnPrintf(pHlp, "Parallax enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
3896 pHlp->pfnPrintf(pHlp, "Overstress = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
3897 pHlp->pfnPrintf(pHlp, "Overstress enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
3898 pHlp->pfnPrintf(pHlp, "TM3 - Temperature Monitoring 3 = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
3899 pHlp->pfnPrintf(pHlp, "TM3-E - TM3 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
3900 pHlp->pfnPrintf(pHlp, "RNG2 - Random Number Generator 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
3901 pHlp->pfnPrintf(pHlp, "RNG2-E - RNG2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
3902 pHlp->pfnPrintf(pHlp, "24 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
3903 pHlp->pfnPrintf(pHlp, "PHE2 - Padlock Hash Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
3904 pHlp->pfnPrintf(pHlp, "PHE2-E - PHE2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
3905 for (unsigned iBit = 27; iBit < 32; iBit++)
3906 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
3907 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", iBit, !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
3908 pHlp->pfnPrintf(pHlp, "\n");
3909 }
3910 }
3911}
3912
3913
3914/**
3915 * Structure used when disassembling and instructions in DBGF.
3916 * This is used so the reader function can get the stuff it needs.
3917 */
3918typedef struct CPUMDISASSTATE
3919{
3920 /** Pointer to the CPU structure. */
3921 PDISCPUSTATE pCpu;
3922 /** Pointer to the VM. */
3923 PVM pVM;
3924 /** Pointer to the VMCPU. */
3925 PVMCPU pVCpu;
3926 /** Pointer to the first byte in the segment. */
3927 RTGCUINTPTR GCPtrSegBase;
3928 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
3929 RTGCUINTPTR GCPtrSegEnd;
3930 /** The size of the segment minus 1. */
3931 RTGCUINTPTR cbSegLimit;
3932 /** Pointer to the current page - R3 Ptr. */
3933 void const *pvPageR3;
3934 /** Pointer to the current page - GC Ptr. */
3935 RTGCPTR pvPageGC;
3936 /** The lock information that PGMPhysReleasePageMappingLock needs. */
3937 PGMPAGEMAPLOCK PageMapLock;
3938 /** Whether the PageMapLock is valid or not. */
3939 bool fLocked;
3940 /** 64 bits mode or not. */
3941 bool f64Bits;
3942} CPUMDISASSTATE, *PCPUMDISASSTATE;
3943
3944
3945/**
3946 * @callback_method_impl{FNDISREADBYTES}
3947 */
3948static DECLCALLBACK(int) cpumR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
3949{
3950 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pDis->pvUser;
3951 for (;;)
3952 {
3953 RTGCUINTPTR GCPtr = pDis->uInstrAddr + offInstr + pState->GCPtrSegBase;
3954
3955 /*
3956 * Need to update the page translation?
3957 */
3958 if ( !pState->pvPageR3
3959 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
3960 {
3961 int rc = VINF_SUCCESS;
3962
3963 /* translate the address */
3964 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
3965 if ( !HMIsEnabled(pState->pVM)
3966 && MMHyperIsInsideArea(pState->pVM, pState->pvPageGC))
3967 {
3968 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
3969 if (!pState->pvPageR3)
3970 rc = VERR_INVALID_POINTER;
3971 }
3972 else
3973 {
3974 /* Release mapping lock previously acquired. */
3975 if (pState->fLocked)
3976 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
3977 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->pvPageGC, &pState->pvPageR3, &pState->PageMapLock);
3978 pState->fLocked = RT_SUCCESS_NP(rc);
3979 }
3980 if (RT_FAILURE(rc))
3981 {
3982 pState->pvPageR3 = NULL;
3983 return rc;
3984 }
3985 }
3986
3987 /*
3988 * Check the segment limit.
3989 */
3990 if (!pState->f64Bits && pDis->uInstrAddr + offInstr > pState->cbSegLimit)
3991 return VERR_OUT_OF_SELECTOR_BOUNDS;
3992
3993 /*
3994 * Calc how much we can read.
3995 */
3996 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
3997 if (!pState->f64Bits)
3998 {
3999 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
4000 if (cb > cbSeg && cbSeg)
4001 cb = cbSeg;
4002 }
4003 if (cb > cbMaxRead)
4004 cb = cbMaxRead;
4005
4006 /*
4007 * Read and advance or exit.
4008 */
4009 memcpy(&pDis->abInstr[offInstr], (uint8_t *)pState->pvPageR3 + (GCPtr & PAGE_OFFSET_MASK), cb);
4010 offInstr += (uint8_t)cb;
4011 if (cb >= cbMinRead)
4012 {
4013 pDis->cbCachedInstr = offInstr;
4014 return VINF_SUCCESS;
4015 }
4016 cbMinRead -= (uint8_t)cb;
4017 cbMaxRead -= (uint8_t)cb;
4018 }
4019}
4020
4021
4022/**
4023 * Disassemble an instruction and return the information in the provided structure.
4024 *
4025 * @returns VBox status code.
4026 * @param pVM Pointer to the VM.
4027 * @param pVCpu Pointer to the VMCPU.
4028 * @param pCtx Pointer to the guest CPU context.
4029 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
4030 * @param pCpu Disassembly state.
4031 * @param pszPrefix String prefix for logging (debug only).
4032 *
4033 */
4034VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
4035{
4036 CPUMDISASSTATE State;
4037 int rc;
4038
4039 const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
4040 State.pCpu = pCpu;
4041 State.pvPageGC = 0;
4042 State.pvPageR3 = NULL;
4043 State.pVM = pVM;
4044 State.pVCpu = pVCpu;
4045 State.fLocked = false;
4046 State.f64Bits = false;
4047
4048 /*
4049 * Get selector information.
4050 */
4051 DISCPUMODE enmDisCpuMode;
4052 if ( (pCtx->cr0 & X86_CR0_PE)
4053 && pCtx->eflags.Bits.u1VM == 0)
4054 {
4055 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
4056 {
4057# ifdef VBOX_WITH_RAW_MODE_NOT_R0
4058 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtx->cs);
4059# endif
4060 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
4061 return VERR_CPUM_HIDDEN_CS_LOAD_ERROR;
4062 }
4063 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->cs.Attr.n.u1Long;
4064 State.GCPtrSegBase = pCtx->cs.u64Base;
4065 State.GCPtrSegEnd = pCtx->cs.u32Limit + 1 + (RTGCUINTPTR)pCtx->cs.u64Base;
4066 State.cbSegLimit = pCtx->cs.u32Limit;
4067 enmDisCpuMode = (State.f64Bits)
4068 ? DISCPUMODE_64BIT
4069 : pCtx->cs.Attr.n.u1DefBig
4070 ? DISCPUMODE_32BIT
4071 : DISCPUMODE_16BIT;
4072 }
4073 else
4074 {
4075 /* real or V86 mode */
4076 enmDisCpuMode = DISCPUMODE_16BIT;
4077 State.GCPtrSegBase = pCtx->cs.Sel * 16;
4078 State.GCPtrSegEnd = 0xFFFFFFFF;
4079 State.cbSegLimit = 0xFFFFFFFF;
4080 }
4081
4082 /*
4083 * Disassemble the instruction.
4084 */
4085 uint32_t cbInstr;
4086#ifndef LOG_ENABLED
4087 rc = DISInstrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State, pCpu, &cbInstr);
4088 if (RT_SUCCESS(rc))
4089 {
4090#else
4091 char szOutput[160];
4092 rc = DISInstrToStrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State,
4093 pCpu, &cbInstr, szOutput, sizeof(szOutput));
4094 if (RT_SUCCESS(rc))
4095 {
4096 /* log it */
4097 if (pszPrefix)
4098 Log(("%s-CPU%d: %s", pszPrefix, pVCpu->idCpu, szOutput));
4099 else
4100 Log(("%s", szOutput));
4101#endif
4102 rc = VINF_SUCCESS;
4103 }
4104 else
4105 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%RGv rc=%Rrc\n", pCtx->cs.Sel, GCPtrPC, rc));
4106
4107 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
4108 if (State.fLocked)
4109 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
4110
4111 return rc;
4112}
4113
4114
4115
4116/**
4117 * API for controlling a few of the CPU features found in CR4.
4118 *
4119 * Currently only X86_CR4_TSD is accepted as input.
4120 *
4121 * @returns VBox status code.
4122 *
4123 * @param pVM Pointer to the VM.
4124 * @param fOr The CR4 OR mask.
4125 * @param fAnd The CR4 AND mask.
4126 */
4127VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
4128{
4129 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
4130 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
4131
4132 pVM->cpum.s.CR4.OrMask &= fAnd;
4133 pVM->cpum.s.CR4.OrMask |= fOr;
4134
4135 return VINF_SUCCESS;
4136}
4137
4138
4139/**
4140 * Gets a pointer to the array of standard CPUID leaves.
4141 *
4142 * CPUMR3GetGuestCpuIdStdMax() give the size of the array.
4143 *
4144 * @returns Pointer to the standard CPUID leaves (read-only).
4145 * @param pVM Pointer to the VM.
4146 * @remark Intended for PATM.
4147 */
4148VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdStdRCPtr(PVM pVM)
4149{
4150 return RCPTRTYPE(PCCPUMCPUID)VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
4151}
4152
4153
4154/**
4155 * Gets a pointer to the array of extended CPUID leaves.
4156 *
4157 * CPUMGetGuestCpuIdExtMax() give the size of the array.
4158 *
4159 * @returns Pointer to the extended CPUID leaves (read-only).
4160 * @param pVM Pointer to the VM.
4161 * @remark Intended for PATM.
4162 */
4163VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdExtRCPtr(PVM pVM)
4164{
4165 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
4166}
4167
4168
4169/**
4170 * Gets a pointer to the array of centaur CPUID leaves.
4171 *
4172 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
4173 *
4174 * @returns Pointer to the centaur CPUID leaves (read-only).
4175 * @param pVM Pointer to the VM.
4176 * @remark Intended for PATM.
4177 */
4178VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdCentaurRCPtr(PVM pVM)
4179{
4180 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
4181}
4182
4183
4184/**
4185 * Gets a pointer to the default CPUID leaf.
4186 *
4187 * @returns Pointer to the default CPUID leaf (read-only).
4188 * @param pVM Pointer to the VM.
4189 * @remark Intended for PATM.
4190 */
4191VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdDefRCPtr(PVM pVM)
4192{
4193 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
4194}
4195
4196#ifdef VBOX_WITH_RAW_MODE
4197
4198/**
4199 * Transforms the guest CPU state to raw-ring mode.
4200 *
4201 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
4202 *
4203 * @returns VBox status. (recompiler failure)
4204 * @param pVCpu Pointer to the VMCPU.
4205 * @param pCtxCore The context core (for trap usage).
4206 * @see @ref pg_raw
4207 */
4208VMMR3DECL(int) CPUMR3RawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
4209{
4210 PVM pVM = pVCpu->CTX_SUFF(pVM);
4211
4212 Assert(!pVCpu->cpum.s.fRawEntered);
4213 Assert(!pVCpu->cpum.s.fRemEntered);
4214 if (!pCtxCore)
4215 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
4216
4217 /*
4218 * Are we in Ring-0?
4219 */
4220 if ( pCtxCore->ss.Sel
4221 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
4222 && !pCtxCore->eflags.Bits.u1VM)
4223 {
4224 /*
4225 * Enter execution mode.
4226 */
4227 PATMRawEnter(pVM, pCtxCore);
4228
4229 /*
4230 * Set CPL to Ring-1.
4231 */
4232 pCtxCore->ss.Sel |= 1;
4233 if ( pCtxCore->cs.Sel
4234 && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
4235 pCtxCore->cs.Sel |= 1;
4236 }
4237 else
4238 {
4239#ifdef VBOX_WITH_RAW_RING1
4240 if ( EMIsRawRing1Enabled(pVM)
4241 && !pCtxCore->eflags.Bits.u1VM
4242 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
4243 {
4244 /* Set CPL to Ring-2. */
4245 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
4246 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
4247 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
4248 }
4249#else
4250 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
4251 ("ring-1 code not supported\n"));
4252#endif
4253 /*
4254 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
4255 */
4256 PATMRawEnter(pVM, pCtxCore);
4257 }
4258
4259 /*
4260 * Assert sanity.
4261 */
4262 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
4263 AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
4264 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
4265 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
4266
4267 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
4268
4269 pVCpu->cpum.s.fRawEntered = true;
4270 return VINF_SUCCESS;
4271}
4272
4273
4274/**
4275 * Transforms the guest CPU state from raw-ring mode to correct values.
4276 *
4277 * This function will change any selector registers with DPL=1 to DPL=0.
4278 *
4279 * @returns Adjusted rc.
4280 * @param pVCpu Pointer to the VMCPU.
4281 * @param rc Raw mode return code
4282 * @param pCtxCore The context core (for trap usage).
4283 * @see @ref pg_raw
4284 */
4285VMMR3DECL(int) CPUMR3RawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
4286{
4287 PVM pVM = pVCpu->CTX_SUFF(pVM);
4288
4289 /*
4290 * Don't leave if we've already left (in GC).
4291 */
4292 Assert(pVCpu->cpum.s.fRawEntered);
4293 Assert(!pVCpu->cpum.s.fRemEntered);
4294 if (!pVCpu->cpum.s.fRawEntered)
4295 return rc;
4296 pVCpu->cpum.s.fRawEntered = false;
4297
4298 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
4299 if (!pCtxCore)
4300 pCtxCore = CPUMCTX2CORE(pCtx);
4301 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
4302 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
4303 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
4304
4305 /*
4306 * Are we executing in raw ring-1?
4307 */
4308 if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
4309 && !pCtxCore->eflags.Bits.u1VM)
4310 {
4311 /*
4312 * Leave execution mode.
4313 */
4314 PATMRawLeave(pVM, pCtxCore, rc);
4315 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
4316 /** @todo See what happens if we remove this. */
4317 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
4318 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
4319 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
4320 pCtxCore->es.Sel &= ~X86_SEL_RPL;
4321 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
4322 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
4323 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
4324 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
4325
4326 /*
4327 * Ring-1 selector => Ring-0.
4328 */
4329 pCtxCore->ss.Sel &= ~X86_SEL_RPL;
4330 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
4331 pCtxCore->cs.Sel &= ~X86_SEL_RPL;
4332 }
4333 else
4334 {
4335 /*
4336 * PATM is taking care of the IOPL and IF flags for us.
4337 */
4338 PATMRawLeave(pVM, pCtxCore, rc);
4339 if (!pCtxCore->eflags.Bits.u1VM)
4340 {
4341# ifdef VBOX_WITH_RAW_RING1
4342 if ( EMIsRawRing1Enabled(pVM)
4343 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)
4344 {
4345 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
4346 /** @todo See what happens if we remove this. */
4347 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2)
4348 pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1;
4349 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2)
4350 pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1;
4351 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2)
4352 pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1;
4353 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2)
4354 pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1;
4355
4356 /*
4357 * Ring-2 selector => Ring-1.
4358 */
4359 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1;
4360 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2)
4361 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1;
4362 }
4363 else
4364 {
4365# endif
4366 /** @todo See what happens if we remove this. */
4367 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
4368 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
4369 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
4370 pCtxCore->es.Sel &= ~X86_SEL_RPL;
4371 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
4372 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
4373 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
4374 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
4375# ifdef VBOX_WITH_RAW_RING1
4376 }
4377# endif
4378 }
4379 }
4380
4381 return rc;
4382}
4383
4384#endif /* VBOX_WITH_RAW_MODE */
4385
4386
4387/**
4388 * Enters REM, gets and resets the changed flags (CPUM_CHANGED_*).
4389 *
4390 * Only REM should ever call this function!
4391 *
4392 * @returns The changed flags.
4393 * @param pVCpu Pointer to the VMCPU.
4394 * @param puCpl Where to return the current privilege level (CPL).
4395 */
4396VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl)
4397{
4398 Assert(!pVCpu->cpum.s.fRawEntered);
4399 Assert(!pVCpu->cpum.s.fRemEntered);
4400
4401 /*
4402 * Get the CPL first.
4403 */
4404 *puCpl = CPUMGetGuestCPL(pVCpu);
4405
4406 /*
4407 * Get and reset the flags.
4408 */
4409 uint32_t fFlags = pVCpu->cpum.s.fChanged;
4410 pVCpu->cpum.s.fChanged = 0;
4411
4412 /** @todo change the switcher to use the fChanged flags. */
4413 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
4414 {
4415 fFlags |= CPUM_CHANGED_FPU_REM;
4416 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
4417 }
4418
4419 pVCpu->cpum.s.fRemEntered = true;
4420 return fFlags;
4421}
4422
4423
4424/**
4425 * Leaves REM.
4426 *
4427 * @param pVCpu Pointer to the VMCPU.
4428 * @param fNoOutOfSyncSels This is @c false if there are out of sync
4429 * registers.
4430 */
4431VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels)
4432{
4433 Assert(!pVCpu->cpum.s.fRawEntered);
4434 Assert(pVCpu->cpum.s.fRemEntered);
4435
4436 pVCpu->cpum.s.fRemEntered = false;
4437}
4438
4439
4440/**
4441 * Called when the ring-3 init phase completes.
4442 *
4443 * @returns VBox status code.
4444 * @param pVM Pointer to the VM.
4445 */
4446VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM)
4447{
4448 for (VMCPUID i = 0; i < pVM->cCpus; i++)
4449 {
4450 /* Cache the APIC base (from the APIC device) once it has been initialized. */
4451 PDMApicGetBase(&pVM->aCpus[i], &pVM->aCpus[i].cpum.s.Guest.msrApicBase);
4452 Log(("CPUMR3InitCompleted pVM=%p APIC base[%u]=%RX64\n", pVM, (unsigned)i, pVM->aCpus[i].cpum.s.Guest.msrApicBase));
4453 }
4454 return VINF_SUCCESS;
4455}
4456
4457/**
4458 * Called when the ring-0 init phases comleted.
4459 *
4460 * @param pVM Pointer to the VM.
4461 */
4462VMMR3DECL(void) CPUMR3LogCpuIds(PVM pVM)
4463{
4464 /*
4465 * Log the cpuid.
4466 */
4467 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
4468 RTCPUSET OnlineSet;
4469 LogRel(("Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
4470 (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
4471 RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
4472 LogRel(("************************* CPUID dump ************************\n"));
4473 DBGFR3Info(pVM->pUVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
4474 LogRel(("\n"));
4475 DBGFR3_INFO_LOG(pVM, "cpuid", "verbose"); /* macro */
4476 RTLogRelSetBuffering(fOldBuffered);
4477 LogRel(("******************** End of CPUID dump **********************\n"));
4478}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette