VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUM.cpp@ 54674

Last change on this file since 54674 was 54674, checked in by vboxsync, 10 years ago

CPUM: Working on refactoring the guest CPUID handling.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 136.1 KB
Line 
1/* $Id: CPUM.cpp 54674 2015-03-06 18:02:31Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_cpum CPUM - CPU Monitor / Manager
19 *
20 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
21 * also responsible for lazy FPU handling and some of the context loading
22 * in raw mode.
23 *
24 * There are three CPU contexts, the most important one is the guest one (GC).
25 * When running in raw-mode (RC) there is a special hyper context for the VMM
26 * part that floats around inside the guest address space. When running in
27 * raw-mode, CPUM also maintains a host context for saving and restoring
28 * registers across world switches. This latter is done in cooperation with the
29 * world switcher (@see pg_vmm).
30 *
31 * @see grp_cpum
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_CPUM
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/cpumdis.h>
40#include <VBox/vmm/cpumctx-v1_6.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/pdmapi.h>
43#include <VBox/vmm/mm.h>
44#include <VBox/vmm/em.h>
45#include <VBox/vmm/selm.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/patm.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/vmm/ssm.h>
50#include "CPUMInternal.h"
51#include <VBox/vmm/vm.h>
52
53#include <VBox/param.h>
54#include <VBox/dis.h>
55#include <VBox/err.h>
56#include <VBox/log.h>
57#include <iprt/asm-amd64-x86.h>
58#include <iprt/assert.h>
59#include <iprt/cpuset.h>
60#include <iprt/mem.h>
61#include <iprt/mp.h>
62#include <iprt/string.h>
63#include "internal/pgm.h"
64
65
66/*******************************************************************************
67* Defined Constants And Macros *
68*******************************************************************************/
69/**
70 * This was used in the saved state up to the early life of version 14.
71 *
72 * It indicates that we may have some out-of-sync hidden segement registers.
73 * It is only relevant for raw-mode.
74 */
75#define CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID RT_BIT(12)
76
77
78/*******************************************************************************
79* Structures and Typedefs *
80*******************************************************************************/
81
82/**
83 * What kind of cpu info dump to perform.
84 */
85typedef enum CPUMDUMPTYPE
86{
87 CPUMDUMPTYPE_TERSE,
88 CPUMDUMPTYPE_DEFAULT,
89 CPUMDUMPTYPE_VERBOSE
90} CPUMDUMPTYPE;
91/** Pointer to a cpu info dump type. */
92typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
93
94
95/*******************************************************************************
96* Internal Functions *
97*******************************************************************************/
98static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
99static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
100static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM);
101static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
102static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
103static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
104static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
105static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
106static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
107static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
108static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
109
110
111/*******************************************************************************
112* Global Variables *
113*******************************************************************************/
114/** Saved state field descriptors for CPUMCTX. */
115static const SSMFIELD g_aCpumCtxFields[] =
116{
117 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
118 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
119 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
120 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
121 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
122 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
123 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
124 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
125 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
126 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
127 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
128 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
129 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
130 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
131 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
132 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
133 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
134 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
135 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
136 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
137 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
138 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
139 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
140 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
141 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
142 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
143 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
144 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
145 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
146 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
147 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
148 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
149 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
150 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
151 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
152 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
153 SSMFIELD_ENTRY( CPUMCTX, rdi),
154 SSMFIELD_ENTRY( CPUMCTX, rsi),
155 SSMFIELD_ENTRY( CPUMCTX, rbp),
156 SSMFIELD_ENTRY( CPUMCTX, rax),
157 SSMFIELD_ENTRY( CPUMCTX, rbx),
158 SSMFIELD_ENTRY( CPUMCTX, rdx),
159 SSMFIELD_ENTRY( CPUMCTX, rcx),
160 SSMFIELD_ENTRY( CPUMCTX, rsp),
161 SSMFIELD_ENTRY( CPUMCTX, rflags),
162 SSMFIELD_ENTRY( CPUMCTX, rip),
163 SSMFIELD_ENTRY( CPUMCTX, r8),
164 SSMFIELD_ENTRY( CPUMCTX, r9),
165 SSMFIELD_ENTRY( CPUMCTX, r10),
166 SSMFIELD_ENTRY( CPUMCTX, r11),
167 SSMFIELD_ENTRY( CPUMCTX, r12),
168 SSMFIELD_ENTRY( CPUMCTX, r13),
169 SSMFIELD_ENTRY( CPUMCTX, r14),
170 SSMFIELD_ENTRY( CPUMCTX, r15),
171 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
172 SSMFIELD_ENTRY( CPUMCTX, es.ValidSel),
173 SSMFIELD_ENTRY( CPUMCTX, es.fFlags),
174 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
175 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
176 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
177 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
178 SSMFIELD_ENTRY( CPUMCTX, cs.ValidSel),
179 SSMFIELD_ENTRY( CPUMCTX, cs.fFlags),
180 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
181 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
182 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
183 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
184 SSMFIELD_ENTRY( CPUMCTX, ss.ValidSel),
185 SSMFIELD_ENTRY( CPUMCTX, ss.fFlags),
186 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
187 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
188 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
189 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
190 SSMFIELD_ENTRY( CPUMCTX, ds.ValidSel),
191 SSMFIELD_ENTRY( CPUMCTX, ds.fFlags),
192 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
193 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
194 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
195 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
196 SSMFIELD_ENTRY( CPUMCTX, fs.ValidSel),
197 SSMFIELD_ENTRY( CPUMCTX, fs.fFlags),
198 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
199 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
200 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
201 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
202 SSMFIELD_ENTRY( CPUMCTX, gs.ValidSel),
203 SSMFIELD_ENTRY( CPUMCTX, gs.fFlags),
204 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
205 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
206 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
207 SSMFIELD_ENTRY( CPUMCTX, cr0),
208 SSMFIELD_ENTRY( CPUMCTX, cr2),
209 SSMFIELD_ENTRY( CPUMCTX, cr3),
210 SSMFIELD_ENTRY( CPUMCTX, cr4),
211 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
212 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
213 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
214 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
215 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
216 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
217 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
218 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
219 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
220 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
221 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
222 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
223 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
224 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
225 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
226 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
227 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
228 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
229 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
230 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
231 /* msrApicBase is not included here, it resides in the APIC device state. */
232 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
233 SSMFIELD_ENTRY( CPUMCTX, ldtr.ValidSel),
234 SSMFIELD_ENTRY( CPUMCTX, ldtr.fFlags),
235 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
236 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
237 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
238 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
239 SSMFIELD_ENTRY( CPUMCTX, tr.ValidSel),
240 SSMFIELD_ENTRY( CPUMCTX, tr.fFlags),
241 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
242 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
243 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
244 SSMFIELD_ENTRY_TERM()
245};
246
247/** Saved state field descriptors for CPUMCTX in V4.1 before the hidden selector
248 * registeres changed. */
249static const SSMFIELD g_aCpumCtxFieldsMem[] =
250{
251 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
252 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
253 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
254 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
255 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
256 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
257 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
258 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
259 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
260 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
261 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
262 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
263 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
264 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
265 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
266 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
267 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
268 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
269 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
270 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
271 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
272 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
273 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
274 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
275 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
276 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
277 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
278 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
279 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
280 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
281 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
282 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
283 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
284 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
285 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
286 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
287 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
288 SSMFIELD_ENTRY( CPUMCTX, rdi),
289 SSMFIELD_ENTRY( CPUMCTX, rsi),
290 SSMFIELD_ENTRY( CPUMCTX, rbp),
291 SSMFIELD_ENTRY( CPUMCTX, rax),
292 SSMFIELD_ENTRY( CPUMCTX, rbx),
293 SSMFIELD_ENTRY( CPUMCTX, rdx),
294 SSMFIELD_ENTRY( CPUMCTX, rcx),
295 SSMFIELD_ENTRY( CPUMCTX, rsp),
296 SSMFIELD_ENTRY_OLD( lss_esp, sizeof(uint32_t)),
297 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
298 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
299 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
300 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
301 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
302 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
303 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
304 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
305 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
306 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
307 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
308 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
309 SSMFIELD_ENTRY( CPUMCTX, rflags),
310 SSMFIELD_ENTRY( CPUMCTX, rip),
311 SSMFIELD_ENTRY( CPUMCTX, r8),
312 SSMFIELD_ENTRY( CPUMCTX, r9),
313 SSMFIELD_ENTRY( CPUMCTX, r10),
314 SSMFIELD_ENTRY( CPUMCTX, r11),
315 SSMFIELD_ENTRY( CPUMCTX, r12),
316 SSMFIELD_ENTRY( CPUMCTX, r13),
317 SSMFIELD_ENTRY( CPUMCTX, r14),
318 SSMFIELD_ENTRY( CPUMCTX, r15),
319 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
320 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
321 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
322 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
323 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
324 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
325 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
326 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
327 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
328 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
329 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
330 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
331 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
332 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
333 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
334 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
335 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
336 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
337 SSMFIELD_ENTRY( CPUMCTX, cr0),
338 SSMFIELD_ENTRY( CPUMCTX, cr2),
339 SSMFIELD_ENTRY( CPUMCTX, cr3),
340 SSMFIELD_ENTRY( CPUMCTX, cr4),
341 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
342 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
343 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
344 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
345 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
346 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
347 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
348 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
349 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
350 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
351 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
352 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
353 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
354 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
355 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
356 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
357 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
358 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
359 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
360 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
361 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
362 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
363 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
364 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
365 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
366 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
367 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
368 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
369 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
370 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
371 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
372 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
373 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
374 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
375 SSMFIELD_ENTRY_TERM()
376};
377
378/** Saved state field descriptors for CPUMCTX_VER1_6. */
379static const SSMFIELD g_aCpumCtxFieldsV16[] =
380{
381 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
382 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
383 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
384 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
385 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
386 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
387 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
388 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
389 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
390 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
391 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
392 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
393 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
394 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
395 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
396 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
397 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
398 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
399 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
400 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
401 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
402 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
403 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
404 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
405 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
406 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
407 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
408 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
409 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
410 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
411 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
412 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
413 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
414 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
415 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
416 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
417 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
418 SSMFIELD_ENTRY( CPUMCTX, rdi),
419 SSMFIELD_ENTRY( CPUMCTX, rsi),
420 SSMFIELD_ENTRY( CPUMCTX, rbp),
421 SSMFIELD_ENTRY( CPUMCTX, rax),
422 SSMFIELD_ENTRY( CPUMCTX, rbx),
423 SSMFIELD_ENTRY( CPUMCTX, rdx),
424 SSMFIELD_ENTRY( CPUMCTX, rcx),
425 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, rsp),
426 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
427 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
428 SSMFIELD_ENTRY_OLD( CPUMCTX, sizeof(uint64_t) /*rsp_notused*/),
429 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
430 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
431 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
432 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
433 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
434 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
435 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
436 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
437 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
438 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
439 SSMFIELD_ENTRY( CPUMCTX, rflags),
440 SSMFIELD_ENTRY( CPUMCTX, rip),
441 SSMFIELD_ENTRY( CPUMCTX, r8),
442 SSMFIELD_ENTRY( CPUMCTX, r9),
443 SSMFIELD_ENTRY( CPUMCTX, r10),
444 SSMFIELD_ENTRY( CPUMCTX, r11),
445 SSMFIELD_ENTRY( CPUMCTX, r12),
446 SSMFIELD_ENTRY( CPUMCTX, r13),
447 SSMFIELD_ENTRY( CPUMCTX, r14),
448 SSMFIELD_ENTRY( CPUMCTX, r15),
449 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, es.u64Base),
450 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
451 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
452 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, cs.u64Base),
453 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
454 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
455 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ss.u64Base),
456 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
457 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
458 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ds.u64Base),
459 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
460 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
461 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, fs.u64Base),
462 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
463 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
464 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gs.u64Base),
465 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
466 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
467 SSMFIELD_ENTRY( CPUMCTX, cr0),
468 SSMFIELD_ENTRY( CPUMCTX, cr2),
469 SSMFIELD_ENTRY( CPUMCTX, cr3),
470 SSMFIELD_ENTRY( CPUMCTX, cr4),
471 SSMFIELD_ENTRY_OLD( cr8, sizeof(uint64_t)),
472 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
473 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
474 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
475 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
476 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
477 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
478 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
479 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
480 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
481 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gdtr.pGdt),
482 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
483 SSMFIELD_ENTRY_OLD( gdtrPadding64, sizeof(uint64_t)),
484 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
485 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, idtr.pIdt),
486 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
487 SSMFIELD_ENTRY_OLD( idtrPadding64, sizeof(uint64_t)),
488 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
489 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
490 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
491 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
492 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
493 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
494 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
495 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
496 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
497 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
498 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
499 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
500 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
501 SSMFIELD_ENTRY_OLD( msrFSBASE, sizeof(uint64_t)),
502 SSMFIELD_ENTRY_OLD( msrGSBASE, sizeof(uint64_t)),
503 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
504 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ldtr.u64Base),
505 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
506 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
507 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, tr.u64Base),
508 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
509 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
510 SSMFIELD_ENTRY_OLD( padding, sizeof(uint32_t)*2),
511 SSMFIELD_ENTRY_TERM()
512};
513
514
515/**
516 * Checks for partial/leaky FXSAVE/FXRSTOR handling on AMD CPUs.
517 *
518 * AMD K7, K8 and newer AMD CPUs do not save/restore the x87 error
519 * pointers (last instruction pointer, last data pointer, last opcode)
520 * except when the ES bit (Exception Summary) in x87 FSW (FPU Status
521 * Word) is set. Thus if we don't clear these registers there is
522 * potential, local FPU leakage from a process using the FPU to
523 * another.
524 *
525 * See AMD Instruction Reference for FXSAVE, FXRSTOR.
526 *
527 * @param pVM Pointer to the VM.
528 */
529static void cpumR3CheckLeakyFpu(PVM pVM)
530{
531 uint32_t u32CpuVersion = ASMCpuId_EAX(1);
532 uint32_t const u32Family = u32CpuVersion >> 8;
533 if ( u32Family >= 6 /* K7 and higher */
534 && ASMIsAmdCpu())
535 {
536 uint32_t cExt = ASMCpuId_EAX(0x80000000);
537 if (ASMIsValidExtRange(cExt))
538 {
539 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
540 if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
541 {
542 for (VMCPUID i = 0; i < pVM->cCpus; i++)
543 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_FFXSR_LEAKY;
544 Log(("CPUMR3Init: host CPU has leaky fxsave/fxrstor behaviour\n"));
545 }
546 }
547 }
548}
549
550
551/**
552 * Initializes the CPUM.
553 *
554 * @returns VBox status code.
555 * @param pVM Pointer to the VM.
556 */
557VMMR3DECL(int) CPUMR3Init(PVM pVM)
558{
559 LogFlow(("CPUMR3Init\n"));
560
561 /*
562 * Assert alignment, sizes and tables.
563 */
564 AssertCompileMemberAlignment(VM, cpum.s, 32);
565 AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
566 AssertCompileSizeAlignment(CPUMCTX, 64);
567 AssertCompileSizeAlignment(CPUMCTXMSRS, 64);
568 AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
569 AssertCompileMemberAlignment(VM, cpum, 64);
570 AssertCompileMemberAlignment(VM, aCpus, 64);
571 AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
572 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
573#ifdef VBOX_STRICT
574 int rc2 = cpumR3MsrStrictInitChecks();
575 AssertRCReturn(rc2, rc2);
576#endif
577
578 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
579 pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
580 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
581
582
583 /* Calculate the offset from CPUMCPU to CPUM. */
584 for (VMCPUID i = 0; i < pVM->cCpus; i++)
585 {
586 PVMCPU pVCpu = &pVM->aCpus[i];
587
588 pVCpu->cpum.s.offCPUM = RT_OFFSETOF(VM, aCpus[i].cpum) - RT_OFFSETOF(VM, cpum);
589 Assert((uintptr_t)&pVCpu->cpum - pVCpu->cpum.s.offCPUM == (uintptr_t)&pVM->cpum);
590 }
591
592 /*
593 * Check that the CPU supports the minimum features we require.
594 */
595 if (!ASMHasCpuId())
596 {
597 Log(("The CPU doesn't support CPUID!\n"));
598 return VERR_UNSUPPORTED_CPU;
599 }
600 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
601 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
602
603 /* Setup the CR4 AND and OR masks used in the switcher */
604 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
605 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
606 {
607 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
608 /* No FXSAVE implies no SSE */
609 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
610 pVM->cpum.s.CR4.OrMask = 0;
611 }
612 else
613 {
614 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
615 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
616 }
617
618 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
619 {
620 Log(("The CPU doesn't support MMX!\n"));
621 return VERR_UNSUPPORTED_CPU;
622 }
623 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
624 {
625 Log(("The CPU doesn't support TSC!\n"));
626 return VERR_UNSUPPORTED_CPU;
627 }
628 /* Bogus on AMD? */
629 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
630 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
631
632 /*
633 * Gather info about the host CPU.
634 */
635 PCPUMCPUIDLEAF paLeaves;
636 uint32_t cLeaves;
637 int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
638 AssertLogRelRCReturn(rc, rc);
639
640 rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures);
641 RTMemFree(paLeaves);
642 AssertLogRelRCReturn(rc, rc);
643 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
644
645 /*
646 * Setup hypervisor startup values.
647 */
648
649 /*
650 * Register saved state data item.
651 */
652 rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
653 NULL, cpumR3LiveExec, NULL,
654 NULL, cpumR3SaveExec, NULL,
655 cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
656 if (RT_FAILURE(rc))
657 return rc;
658
659 /*
660 * Register info handlers and registers with the debugger facility.
661 */
662 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
663 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
664 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
665 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
666 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
667 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
668
669 rc = cpumR3DbgInit(pVM);
670 if (RT_FAILURE(rc))
671 return rc;
672
673 /*
674 * Check if we need to workaround partial/leaky FPU handling.
675 */
676 cpumR3CheckLeakyFpu(pVM);
677
678 /*
679 * Initialize the Guest CPUID state.
680 */
681 rc = cpumR3CpuIdInit(pVM);
682 if (RT_FAILURE(rc))
683 return rc;
684 CPUMR3Reset(pVM);
685 return VINF_SUCCESS;
686}
687
688
689/**
690 * Applies relocations to data and code managed by this
691 * component. This function will be called at init and
692 * whenever the VMM need to relocate it self inside the GC.
693 *
694 * The CPUM will update the addresses used by the switcher.
695 *
696 * @param pVM The VM.
697 */
698VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
699{
700 LogFlow(("CPUMR3Relocate\n"));
701
702 pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3);
703 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
704
705 /* Recheck the guest DRx values in raw-mode. */
706 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
707 CPUMRecalcHyperDRx(&pVM->aCpus[iCpu], UINT8_MAX, false);
708}
709
710
711/**
712 * Apply late CPUM property changes based on the fHWVirtEx setting
713 *
714 * @param pVM Pointer to the VM.
715 * @param fHWVirtExEnabled HWVirtEx enabled/disabled
716 */
717VMMR3DECL(void) CPUMR3SetHWVirtEx(PVM pVM, bool fHWVirtExEnabled)
718{
719 /*
720 * Workaround for missing cpuid(0) patches when leaf 4 returns GuestCpuIdPatmDef:
721 * If we miss to patch a cpuid(0).eax then Linux tries to determine the number
722 * of processors from (cpuid(4).eax >> 26) + 1.
723 *
724 * Note: this code is obsolete, but let's keep it here for reference.
725 * Purpose is valid when we artificially cap the max std id to less than 4.
726 */
727 if (!fHWVirtExEnabled)
728 {
729 Assert( pVM->cpum.s.aGuestCpuIdPatmStd[4].eax == 0
730 || pVM->cpum.s.aGuestCpuIdPatmStd[0].eax < 0x4);
731 pVM->cpum.s.aGuestCpuIdPatmStd[4].eax = 0;
732 }
733}
734
735/**
736 * Terminates the CPUM.
737 *
738 * Termination means cleaning up and freeing all resources,
739 * the VM it self is at this point powered off or suspended.
740 *
741 * @returns VBox status code.
742 * @param pVM Pointer to the VM.
743 */
744VMMR3DECL(int) CPUMR3Term(PVM pVM)
745{
746#ifdef VBOX_WITH_CRASHDUMP_MAGIC
747 for (VMCPUID i = 0; i < pVM->cCpus; i++)
748 {
749 PVMCPU pVCpu = &pVM->aCpus[i];
750 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
751
752 memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
753 pVCpu->cpum.s.uMagic = 0;
754 pCtx->dr[5] = 0;
755 }
756#else
757 NOREF(pVM);
758#endif
759 return VINF_SUCCESS;
760}
761
762
763/**
764 * Resets a virtual CPU.
765 *
766 * Used by CPUMR3Reset and CPU hot plugging.
767 *
768 * @param pVM Pointer to the cross context VM structure.
769 * @param pVCpu Pointer to the cross context virtual CPU structure of
770 * the CPU that is being reset. This may differ from the
771 * current EMT.
772 */
773VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
774{
775 /** @todo anything different for VCPU > 0? */
776 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
777
778 /*
779 * Initialize everything to ZERO first.
780 */
781 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
782 memset(pCtx, 0, sizeof(*pCtx));
783 pVCpu->cpum.s.fUseFlags = fUseFlags;
784
785 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
786 pCtx->eip = 0x0000fff0;
787 pCtx->edx = 0x00000600; /* P6 processor */
788 pCtx->eflags.Bits.u1Reserved0 = 1;
789
790 pCtx->cs.Sel = 0xf000;
791 pCtx->cs.ValidSel = 0xf000;
792 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
793 pCtx->cs.u64Base = UINT64_C(0xffff0000);
794 pCtx->cs.u32Limit = 0x0000ffff;
795 pCtx->cs.Attr.n.u1DescType = 1; /* code/data segment */
796 pCtx->cs.Attr.n.u1Present = 1;
797 pCtx->cs.Attr.n.u4Type = X86_SEL_TYPE_ER_ACC;
798
799 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
800 pCtx->ds.u32Limit = 0x0000ffff;
801 pCtx->ds.Attr.n.u1DescType = 1; /* code/data segment */
802 pCtx->ds.Attr.n.u1Present = 1;
803 pCtx->ds.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
804
805 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
806 pCtx->es.u32Limit = 0x0000ffff;
807 pCtx->es.Attr.n.u1DescType = 1; /* code/data segment */
808 pCtx->es.Attr.n.u1Present = 1;
809 pCtx->es.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
810
811 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
812 pCtx->fs.u32Limit = 0x0000ffff;
813 pCtx->fs.Attr.n.u1DescType = 1; /* code/data segment */
814 pCtx->fs.Attr.n.u1Present = 1;
815 pCtx->fs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
816
817 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
818 pCtx->gs.u32Limit = 0x0000ffff;
819 pCtx->gs.Attr.n.u1DescType = 1; /* code/data segment */
820 pCtx->gs.Attr.n.u1Present = 1;
821 pCtx->gs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
822
823 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
824 pCtx->ss.u32Limit = 0x0000ffff;
825 pCtx->ss.Attr.n.u1Present = 1;
826 pCtx->ss.Attr.n.u1DescType = 1; /* code/data segment */
827 pCtx->ss.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
828
829 pCtx->idtr.cbIdt = 0xffff;
830 pCtx->gdtr.cbGdt = 0xffff;
831
832 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
833 pCtx->ldtr.u32Limit = 0xffff;
834 pCtx->ldtr.Attr.n.u1Present = 1;
835 pCtx->ldtr.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
836
837 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
838 pCtx->tr.u32Limit = 0xffff;
839 pCtx->tr.Attr.n.u1Present = 1;
840 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; /* Deduction, not properly documented by Intel. */
841
842 pCtx->dr[6] = X86_DR6_INIT_VAL;
843 pCtx->dr[7] = X86_DR7_INIT_VAL;
844
845 pCtx->fpu.FTW = 0x00; /* All empty (abbridged tag reg edition). */
846 pCtx->fpu.FCW = 0x37f;
847
848 /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1.
849 IA-32 Processor States Following Power-up, Reset, or INIT */
850 pCtx->fpu.MXCSR = 0x1F80;
851 pCtx->fpu.MXCSR_MASK = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really
852 supports all bits, since a zero value here should be read as 0xffbf. */
853
854 /*
855 * MSRs.
856 */
857 /* Init PAT MSR */
858 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
859
860 /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State.
861 * The Intel docs don't mention it. */
862 Assert(!pCtx->msrEFER);
863
864 /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really
865 is supposed to be here, just trying provide useful/sensible values. */
866 PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE);
867 if (pRange)
868 {
869 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
870 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
871 | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0)
872 | MSR_IA32_MISC_ENABLE_FAST_STRINGS;
873 pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
874 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
875 pRange->fWrGpMask &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
876 }
877
878 /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
879
880 /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
881 * called from each EMT while we're getting called by CPUMR3Reset()
882 * iteratively on the same thread. Fix later. */
883#if 0 /** @todo r=bird: This we will do in TM, not here. */
884 /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
885 CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
886#endif
887
888
889 /* C-state control. Guesses. */
890 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
891
892
893 /*
894 * Get the APIC base MSR from the APIC device. For historical reasons (saved state), the APIC base
895 * continues to reside in the APIC device and we cache it here in the VCPU for all further accesses.
896 */
897 PDMApicGetBase(pVCpu, &pCtx->msrApicBase);
898}
899
900
901/**
902 * Resets the CPU.
903 *
904 * @returns VINF_SUCCESS.
905 * @param pVM Pointer to the VM.
906 */
907VMMR3DECL(void) CPUMR3Reset(PVM pVM)
908{
909 for (VMCPUID i = 0; i < pVM->cCpus; i++)
910 {
911 CPUMR3ResetCpu(pVM, &pVM->aCpus[i]);
912
913#ifdef VBOX_WITH_CRASHDUMP_MAGIC
914 PCPUMCTX pCtx = &pVM->aCpus[i].cpum.s.Guest;
915
916 /* Magic marker for searching in crash dumps. */
917 strcpy((char *)pVM->aCpus[i].cpum.s.aMagic, "CPUMCPU Magic");
918 pVM->aCpus[i].cpum.s.uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
919 pCtx->dr[5] = UINT64_C(0xDEADBEEFDEADBEEF);
920#endif
921 }
922}
923
924
925
926
927/**
928 * Pass 0 live exec callback.
929 *
930 * @returns VINF_SSM_DONT_CALL_AGAIN.
931 * @param pVM Pointer to the VM.
932 * @param pSSM The saved state handle.
933 * @param uPass The pass (0).
934 */
935static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
936{
937 AssertReturn(uPass == 0, VERR_SSM_UNEXPECTED_PASS);
938 cpumR3SaveCpuId(pVM, pSSM);
939 return VINF_SSM_DONT_CALL_AGAIN;
940}
941
942
943/**
944 * Execute state save operation.
945 *
946 * @returns VBox status code.
947 * @param pVM Pointer to the VM.
948 * @param pSSM SSM operation handle.
949 */
950static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
951{
952 /*
953 * Save.
954 */
955 for (VMCPUID i = 0; i < pVM->cCpus; i++)
956 {
957 PVMCPU pVCpu = &pVM->aCpus[i];
958 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), 0, g_aCpumCtxFields, NULL);
959 }
960
961 SSMR3PutU32(pSSM, pVM->cCpus);
962 SSMR3PutU32(pSSM, sizeof(pVM->aCpus[0].cpum.s.GuestMsrs.msr));
963 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
964 {
965 PVMCPU pVCpu = &pVM->aCpus[iCpu];
966
967 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), 0, g_aCpumCtxFields, NULL);
968 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
969 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
970 AssertCompileSizeAlignment(pVCpu->cpum.s.GuestMsrs.msr, sizeof(uint64_t));
971 SSMR3PutMem(pSSM, &pVCpu->cpum.s.GuestMsrs, sizeof(pVCpu->cpum.s.GuestMsrs.msr));
972 }
973
974 cpumR3SaveCpuId(pVM, pSSM);
975 return VINF_SUCCESS;
976}
977
978
979/**
980 * @copydoc FNSSMINTLOADPREP
981 */
982static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
983{
984 NOREF(pSSM);
985 pVM->cpum.s.fPendingRestore = true;
986 return VINF_SUCCESS;
987}
988
989
990/**
991 * @copydoc FNSSMINTLOADEXEC
992 */
993static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
994{
995 /*
996 * Validate version.
997 */
998 if ( uVersion != CPUM_SAVED_STATE_VERSION
999 && uVersion != CPUM_SAVED_STATE_VERSION_MEM
1000 && uVersion != CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE
1001 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_2
1002 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0
1003 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR
1004 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
1005 && uVersion != CPUM_SAVED_STATE_VERSION_VER1_6)
1006 {
1007 AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
1008 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1009 }
1010
1011 if (uPass == SSM_PASS_FINAL)
1012 {
1013 /*
1014 * Set the size of RTGCPTR for SSMR3GetGCPtr. (Only necessary for
1015 * really old SSM file versions.)
1016 */
1017 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1018 SSMR3HandleSetGCPtrSize(pSSM, sizeof(RTGCPTR32));
1019 else if (uVersion <= CPUM_SAVED_STATE_VERSION_VER3_0)
1020 SSMR3HandleSetGCPtrSize(pSSM, HC_ARCH_BITS == 32 ? sizeof(RTGCPTR32) : sizeof(RTGCPTR));
1021
1022 uint32_t const fLoad = uVersion > CPUM_SAVED_STATE_VERSION_MEM ? 0 : SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED;
1023 PCSSMFIELD paCpumCtxFields = g_aCpumCtxFields;
1024 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1025 paCpumCtxFields = g_aCpumCtxFieldsV16;
1026 else if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1027 paCpumCtxFields = g_aCpumCtxFieldsMem;
1028
1029 /*
1030 * Restore.
1031 */
1032 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1033 {
1034 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1035 uint64_t uCR3 = pVCpu->cpum.s.Hyper.cr3;
1036 uint64_t uRSP = pVCpu->cpum.s.Hyper.rsp; /* see VMMR3Relocate(). */
1037 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), fLoad, paCpumCtxFields, NULL);
1038 pVCpu->cpum.s.Hyper.cr3 = uCR3;
1039 pVCpu->cpum.s.Hyper.rsp = uRSP;
1040 }
1041
1042 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
1043 {
1044 uint32_t cCpus;
1045 int rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
1046 AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
1047 VERR_SSM_UNEXPECTED_DATA);
1048 }
1049 AssertLogRelMsgReturn( uVersion > CPUM_SAVED_STATE_VERSION_VER2_0
1050 || pVM->cCpus == 1,
1051 ("cCpus=%u\n", pVM->cCpus),
1052 VERR_SSM_UNEXPECTED_DATA);
1053
1054 uint32_t cbMsrs = 0;
1055 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
1056 {
1057 int rc = SSMR3GetU32(pSSM, &cbMsrs); AssertRCReturn(rc, rc);
1058 AssertLogRelMsgReturn(RT_ALIGN(cbMsrs, sizeof(uint64_t)) == cbMsrs, ("Size of MSRs is misaligned: %#x\n", cbMsrs),
1059 VERR_SSM_UNEXPECTED_DATA);
1060 AssertLogRelMsgReturn(cbMsrs <= sizeof(CPUMCTXMSRS) && cbMsrs > 0, ("Size of MSRs is out of range: %#x\n", cbMsrs),
1061 VERR_SSM_UNEXPECTED_DATA);
1062 }
1063
1064 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1065 {
1066 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1067 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), fLoad,
1068 paCpumCtxFields, NULL);
1069 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fUseFlags);
1070 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fChanged);
1071 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
1072 SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], cbMsrs);
1073 else if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_0)
1074 {
1075 SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], 2 * sizeof(uint64_t)); /* Restore two MSRs. */
1076 SSMR3Skip(pSSM, 62 * sizeof(uint64_t));
1077 }
1078
1079 /* REM and other may have cleared must-be-one fields in DR6 and
1080 DR7, fix these. */
1081 pVCpu->cpum.s.Guest.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
1082 pVCpu->cpum.s.Guest.dr[6] |= X86_DR6_RA1_MASK;
1083 pVCpu->cpum.s.Guest.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
1084 pVCpu->cpum.s.Guest.dr[7] |= X86_DR7_RA1_MASK;
1085 }
1086
1087 /* Older states does not have the internal selector register flags
1088 and valid selector value. Supply those. */
1089 if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1090 {
1091 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1092 {
1093 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1094 bool const fValid = HMIsEnabled(pVM)
1095 || ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
1096 && !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID));
1097 PCPUMSELREG paSelReg = CPUMCTX_FIRST_SREG(&pVCpu->cpum.s.Guest);
1098 if (fValid)
1099 {
1100 for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
1101 {
1102 paSelReg[iSelReg].fFlags = CPUMSELREG_FLAGS_VALID;
1103 paSelReg[iSelReg].ValidSel = paSelReg[iSelReg].Sel;
1104 }
1105
1106 pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1107 pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
1108 }
1109 else
1110 {
1111 for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
1112 {
1113 paSelReg[iSelReg].fFlags = 0;
1114 paSelReg[iSelReg].ValidSel = 0;
1115 }
1116
1117 /* This might not be 104% correct, but I think it's close
1118 enough for all practical purposes... (REM always loaded
1119 LDTR registers.) */
1120 pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1121 pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
1122 }
1123 pVCpu->cpum.s.Guest.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1124 pVCpu->cpum.s.Guest.tr.ValidSel = pVCpu->cpum.s.Guest.tr.Sel;
1125 }
1126 }
1127
1128 /* Clear CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID. */
1129 if ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
1130 && uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1131 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1132 pVM->aCpus[iCpu].cpum.s.fChanged &= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
1133
1134 /*
1135 * A quick sanity check.
1136 */
1137 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1138 {
1139 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1140 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.es.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1141 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.cs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1142 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ss.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1143 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ds.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1144 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.fs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1145 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.gs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1146 }
1147 }
1148
1149 pVM->cpum.s.fPendingRestore = false;
1150
1151 /*
1152 * Guest CPUIDs.
1153 */
1154 if (uVersion > CPUM_SAVED_STATE_VERSION_VER3_0)
1155 return cpumR3LoadCpuId(pVM, pSSM, uVersion);
1156
1157 /** @todo Merge the code below into cpumR3LoadCpuId when we've found out what is
1158 * actually required. */
1159
1160 /*
1161 * Restore the CPUID leaves.
1162 *
1163 * Note that we support restoring less than the current amount of standard
1164 * leaves because we've been allowed more is newer version of VBox.
1165 */
1166 uint32_t cElements;
1167 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1168 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
1169 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1170 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdPatmStd[0]));
1171
1172 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1173 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt))
1174 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1175 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmExt[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmExt));
1176
1177 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1178 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur))
1179 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1180 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmCentaur));
1181
1182 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestCpuIdPatmDef, sizeof(pVM->cpum.s.GuestCpuIdPatmDef));
1183
1184 /*
1185 * Check that the basic cpuid id information is unchanged.
1186 */
1187 /** @todo we should check the 64 bits capabilities too! */
1188 uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
1189 ASMCpuIdExSlow(0, 0, 0, 0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
1190 ASMCpuIdExSlow(1, 0, 0, 0, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
1191 uint32_t au32CpuIdSaved[8];
1192 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
1193 if (RT_SUCCESS(rc))
1194 {
1195 /* Ignore CPU stepping. */
1196 au32CpuId[4] &= 0xfffffff0;
1197 au32CpuIdSaved[4] &= 0xfffffff0;
1198
1199 /* Ignore APIC ID (AMD specs). */
1200 au32CpuId[5] &= ~0xff000000;
1201 au32CpuIdSaved[5] &= ~0xff000000;
1202
1203 /* Ignore the number of Logical CPUs (AMD specs). */
1204 au32CpuId[5] &= ~0x00ff0000;
1205 au32CpuIdSaved[5] &= ~0x00ff0000;
1206
1207 /* Ignore some advanced capability bits, that we don't expose to the guest. */
1208 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1209 | X86_CPUID_FEATURE_ECX_VMX
1210 | X86_CPUID_FEATURE_ECX_SMX
1211 | X86_CPUID_FEATURE_ECX_EST
1212 | X86_CPUID_FEATURE_ECX_TM2
1213 | X86_CPUID_FEATURE_ECX_CNTXID
1214 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1215 | X86_CPUID_FEATURE_ECX_PDCM
1216 | X86_CPUID_FEATURE_ECX_DCA
1217 | X86_CPUID_FEATURE_ECX_X2APIC
1218 );
1219 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1220 | X86_CPUID_FEATURE_ECX_VMX
1221 | X86_CPUID_FEATURE_ECX_SMX
1222 | X86_CPUID_FEATURE_ECX_EST
1223 | X86_CPUID_FEATURE_ECX_TM2
1224 | X86_CPUID_FEATURE_ECX_CNTXID
1225 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1226 | X86_CPUID_FEATURE_ECX_PDCM
1227 | X86_CPUID_FEATURE_ECX_DCA
1228 | X86_CPUID_FEATURE_ECX_X2APIC
1229 );
1230
1231 /* Make sure we don't forget to update the masks when enabling
1232 * features in the future.
1233 */
1234 AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx &
1235 ( X86_CPUID_FEATURE_ECX_DTES64
1236 | X86_CPUID_FEATURE_ECX_VMX
1237 | X86_CPUID_FEATURE_ECX_SMX
1238 | X86_CPUID_FEATURE_ECX_EST
1239 | X86_CPUID_FEATURE_ECX_TM2
1240 | X86_CPUID_FEATURE_ECX_CNTXID
1241 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1242 | X86_CPUID_FEATURE_ECX_PDCM
1243 | X86_CPUID_FEATURE_ECX_DCA
1244 | X86_CPUID_FEATURE_ECX_X2APIC
1245 )));
1246 /* do the compare */
1247 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
1248 {
1249 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1250 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
1251 "Saved=%.*Rhxs\n"
1252 "Real =%.*Rhxs\n",
1253 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1254 sizeof(au32CpuId), au32CpuId));
1255 else
1256 {
1257 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
1258 "Saved=%.*Rhxs\n"
1259 "Real =%.*Rhxs\n",
1260 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1261 sizeof(au32CpuId), au32CpuId));
1262 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
1263 }
1264 }
1265 }
1266
1267 return rc;
1268}
1269
1270
1271/**
1272 * @copydoc FNSSMINTLOADPREP
1273 */
1274static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
1275{
1276 if (RT_FAILURE(SSMR3HandleGetStatus(pSSM)))
1277 return VINF_SUCCESS;
1278
1279 /* just check this since we can. */ /** @todo Add a SSM unit flag for indicating that it's mandatory during a restore. */
1280 if (pVM->cpum.s.fPendingRestore)
1281 {
1282 LogRel(("CPUM: Missing state!\n"));
1283 return VERR_INTERNAL_ERROR_2;
1284 }
1285
1286 bool const fSupportsLongMode = VMR3IsLongModeAllowed(pVM);
1287 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1288 {
1289 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1290
1291 /* Notify PGM of the NXE states in case they've changed. */
1292 PGMNotifyNxeChanged(pVCpu, RT_BOOL(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
1293
1294 /* Cache the local APIC base from the APIC device. During init. this is done in CPUMR3ResetCpu(). */
1295 PDMApicGetBase(pVCpu, &pVCpu->cpum.s.Guest.msrApicBase);
1296
1297 /* During init. this is done in CPUMR3InitCompleted(). */
1298 if (fSupportsLongMode)
1299 pVCpu->cpum.s.fUseFlags |= CPUM_USE_SUPPORTS_LONGMODE;
1300 }
1301 return VINF_SUCCESS;
1302}
1303
1304
1305/**
1306 * Checks if the CPUM state restore is still pending.
1307 *
1308 * @returns true / false.
1309 * @param pVM Pointer to the VM.
1310 */
1311VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM)
1312{
1313 return pVM->cpum.s.fPendingRestore;
1314}
1315
1316
1317/**
1318 * Formats the EFLAGS value into mnemonics.
1319 *
1320 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
1321 * @param efl The EFLAGS value.
1322 */
1323static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
1324{
1325 /*
1326 * Format the flags.
1327 */
1328 static const struct
1329 {
1330 const char *pszSet; const char *pszClear; uint32_t fFlag;
1331 } s_aFlags[] =
1332 {
1333 { "vip",NULL, X86_EFL_VIP },
1334 { "vif",NULL, X86_EFL_VIF },
1335 { "ac", NULL, X86_EFL_AC },
1336 { "vm", NULL, X86_EFL_VM },
1337 { "rf", NULL, X86_EFL_RF },
1338 { "nt", NULL, X86_EFL_NT },
1339 { "ov", "nv", X86_EFL_OF },
1340 { "dn", "up", X86_EFL_DF },
1341 { "ei", "di", X86_EFL_IF },
1342 { "tf", NULL, X86_EFL_TF },
1343 { "nt", "pl", X86_EFL_SF },
1344 { "nz", "zr", X86_EFL_ZF },
1345 { "ac", "na", X86_EFL_AF },
1346 { "po", "pe", X86_EFL_PF },
1347 { "cy", "nc", X86_EFL_CF },
1348 };
1349 char *psz = pszEFlags;
1350 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1351 {
1352 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
1353 if (pszAdd)
1354 {
1355 strcpy(psz, pszAdd);
1356 psz += strlen(pszAdd);
1357 *psz++ = ' ';
1358 }
1359 }
1360 psz[-1] = '\0';
1361}
1362
1363
1364/**
1365 * Formats a full register dump.
1366 *
1367 * @param pVM Pointer to the VM.
1368 * @param pCtx The context to format.
1369 * @param pCtxCore The context core to format.
1370 * @param pHlp Output functions.
1371 * @param enmType The dump type.
1372 * @param pszPrefix Register name prefix.
1373 */
1374static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType,
1375 const char *pszPrefix)
1376{
1377 NOREF(pVM);
1378
1379 /*
1380 * Format the EFLAGS.
1381 */
1382 uint32_t efl = pCtxCore->eflags.u32;
1383 char szEFlags[80];
1384 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1385
1386 /*
1387 * Format the registers.
1388 */
1389 switch (enmType)
1390 {
1391 case CPUMDUMPTYPE_TERSE:
1392 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1393 pHlp->pfnPrintf(pHlp,
1394 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1395 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1396 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1397 "%sr14=%016RX64 %sr15=%016RX64\n"
1398 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1399 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1400 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1401 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1402 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1403 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1404 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1405 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
1406 else
1407 pHlp->pfnPrintf(pHlp,
1408 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1409 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1410 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1411 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1412 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1413 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1414 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
1415 break;
1416
1417 case CPUMDUMPTYPE_DEFAULT:
1418 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1419 pHlp->pfnPrintf(pHlp,
1420 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1421 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1422 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1423 "%sr14=%016RX64 %sr15=%016RX64\n"
1424 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1425 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1426 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%016RX64:%04x %sldtr=%04x\n"
1427 ,
1428 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1429 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1430 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1431 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1432 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1433 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
1434 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1435 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
1436 else
1437 pHlp->pfnPrintf(pHlp,
1438 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1439 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1440 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1441 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%08RX64:%04x %sldtr=%04x\n"
1442 ,
1443 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1444 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1445 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1446 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
1447 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1448 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
1449 break;
1450
1451 case CPUMDUMPTYPE_VERBOSE:
1452 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1453 pHlp->pfnPrintf(pHlp,
1454 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1455 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1456 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1457 "%sr14=%016RX64 %sr15=%016RX64\n"
1458 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1459 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1460 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1461 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1462 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1463 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1464 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1465 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
1466 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
1467 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
1468 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1469 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1470 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1471 "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
1472 ,
1473 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1474 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1475 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1476 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1477 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u,
1478 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u,
1479 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u,
1480 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u,
1481 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u,
1482 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u,
1483 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1484 pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1], pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1485 pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5], pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1486 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1487 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1488 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1489 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1490 else
1491 pHlp->pfnPrintf(pHlp,
1492 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1493 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1494 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
1495 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
1496 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
1497 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
1498 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
1499 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
1500 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1501 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1502 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1503 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1504 ,
1505 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1506 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1507 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u, pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1],
1508 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u, pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1509 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u, pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5],
1510 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u, pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1511 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
1512 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1513 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1514 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1515 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1516 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1517
1518 pHlp->pfnPrintf(pHlp,
1519 "%sFCW=%04x %sFSW=%04x %sFTW=%04x %sFOP=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
1520 "%sFPUIP=%08x %sCS=%04x %sRsrvd1=%04x %sFPUDP=%08x %sDS=%04x %sRsvrd2=%04x\n"
1521 ,
1522 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW, pszPrefix, pCtx->fpu.FOP,
1523 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK,
1524 pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsrvd1,
1525 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2
1526 );
1527 unsigned iShift = (pCtx->fpu.FSW >> 11) & 7;
1528 for (unsigned iST = 0; iST < RT_ELEMENTS(pCtx->fpu.aRegs); iST++)
1529 {
1530 unsigned iFPR = (iST + iShift) % RT_ELEMENTS(pCtx->fpu.aRegs);
1531 unsigned uTag = pCtx->fpu.FTW & (1 << iFPR) ? 1 : 0;
1532 char chSign = pCtx->fpu.aRegs[0].au16[4] & 0x8000 ? '-' : '+';
1533 unsigned iInteger = (unsigned)(pCtx->fpu.aRegs[0].au64[0] >> 63);
1534 uint64_t u64Fraction = pCtx->fpu.aRegs[0].au64[0] & UINT64_C(0x7fffffffffffffff);
1535 unsigned uExponent = pCtx->fpu.aRegs[0].au16[4] & 0x7fff;
1536 /** @todo This isn't entirenly correct and needs more work! */
1537 pHlp->pfnPrintf(pHlp,
1538 "%sST(%u)=%sFPR%u={%04RX16'%08RX32'%08RX32} t%d %c%u.%022llu ^ %u",
1539 pszPrefix, iST, pszPrefix, iFPR,
1540 pCtx->fpu.aRegs[0].au16[4], pCtx->fpu.aRegs[0].au32[1], pCtx->fpu.aRegs[0].au32[0],
1541 uTag, chSign, iInteger, u64Fraction, uExponent);
1542 if (pCtx->fpu.aRegs[0].au16[5] || pCtx->fpu.aRegs[0].au16[6] || pCtx->fpu.aRegs[0].au16[7])
1543 pHlp->pfnPrintf(pHlp, " res={%04RX16,%04RX16,%04RX16}\n",
1544 pCtx->fpu.aRegs[0].au16[5], pCtx->fpu.aRegs[0].au16[6], pCtx->fpu.aRegs[0].au16[7]);
1545 else
1546 pHlp->pfnPrintf(pHlp, "\n");
1547 }
1548 for (unsigned iXMM = 0; iXMM < RT_ELEMENTS(pCtx->fpu.aXMM); iXMM++)
1549 pHlp->pfnPrintf(pHlp,
1550 iXMM & 1
1551 ? "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32\n"
1552 : "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32 ",
1553 pszPrefix, iXMM, iXMM < 10 ? " " : "",
1554 pCtx->fpu.aXMM[iXMM].au32[3],
1555 pCtx->fpu.aXMM[iXMM].au32[2],
1556 pCtx->fpu.aXMM[iXMM].au32[1],
1557 pCtx->fpu.aXMM[iXMM].au32[0]);
1558 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->fpu.au32RsrvdRest); i++)
1559 if (pCtx->fpu.au32RsrvdRest[i])
1560 pHlp->pfnPrintf(pHlp, "%sRsrvdRest[i]=%RX32 (offset=%#x)\n",
1561 pszPrefix, i, pCtx->fpu.au32RsrvdRest[i], RT_OFFSETOF(X86FXSTATE, au32RsrvdRest[i]) );
1562
1563 pHlp->pfnPrintf(pHlp,
1564 "%sEFER =%016RX64\n"
1565 "%sPAT =%016RX64\n"
1566 "%sSTAR =%016RX64\n"
1567 "%sCSTAR =%016RX64\n"
1568 "%sLSTAR =%016RX64\n"
1569 "%sSFMASK =%016RX64\n"
1570 "%sKERNELGSBASE =%016RX64\n",
1571 pszPrefix, pCtx->msrEFER,
1572 pszPrefix, pCtx->msrPAT,
1573 pszPrefix, pCtx->msrSTAR,
1574 pszPrefix, pCtx->msrCSTAR,
1575 pszPrefix, pCtx->msrLSTAR,
1576 pszPrefix, pCtx->msrSFMASK,
1577 pszPrefix, pCtx->msrKERNELGSBASE);
1578 break;
1579 }
1580}
1581
1582
1583/**
1584 * Display all cpu states and any other cpum info.
1585 *
1586 * @param pVM Pointer to the VM.
1587 * @param pHlp The info helper functions.
1588 * @param pszArgs Arguments, ignored.
1589 */
1590static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1591{
1592 cpumR3InfoGuest(pVM, pHlp, pszArgs);
1593 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
1594 cpumR3InfoHyper(pVM, pHlp, pszArgs);
1595 cpumR3InfoHost(pVM, pHlp, pszArgs);
1596}
1597
1598
1599/**
1600 * Parses the info argument.
1601 *
1602 * The argument starts with 'verbose', 'terse' or 'default' and then
1603 * continues with the comment string.
1604 *
1605 * @param pszArgs The pointer to the argument string.
1606 * @param penmType Where to store the dump type request.
1607 * @param ppszComment Where to store the pointer to the comment string.
1608 */
1609static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
1610{
1611 if (!pszArgs)
1612 {
1613 *penmType = CPUMDUMPTYPE_DEFAULT;
1614 *ppszComment = "";
1615 }
1616 else
1617 {
1618 if (!strncmp(pszArgs, RT_STR_TUPLE("verbose")))
1619 {
1620 pszArgs += 7;
1621 *penmType = CPUMDUMPTYPE_VERBOSE;
1622 }
1623 else if (!strncmp(pszArgs, RT_STR_TUPLE("terse")))
1624 {
1625 pszArgs += 5;
1626 *penmType = CPUMDUMPTYPE_TERSE;
1627 }
1628 else if (!strncmp(pszArgs, RT_STR_TUPLE("default")))
1629 {
1630 pszArgs += 7;
1631 *penmType = CPUMDUMPTYPE_DEFAULT;
1632 }
1633 else
1634 *penmType = CPUMDUMPTYPE_DEFAULT;
1635 *ppszComment = RTStrStripL(pszArgs);
1636 }
1637}
1638
1639
1640/**
1641 * Display the guest cpu state.
1642 *
1643 * @param pVM Pointer to the VM.
1644 * @param pHlp The info helper functions.
1645 * @param pszArgs Arguments, ignored.
1646 */
1647static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1648{
1649 CPUMDUMPTYPE enmType;
1650 const char *pszComment;
1651 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1652
1653 /* @todo SMP support! */
1654 PVMCPU pVCpu = VMMGetCpu(pVM);
1655 if (!pVCpu)
1656 pVCpu = &pVM->aCpus[0];
1657
1658 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
1659
1660 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1661 cpumR3InfoOne(pVM, pCtx, CPUMCTX2CORE(pCtx), pHlp, enmType, "");
1662}
1663
1664
1665/**
1666 * Display the current guest instruction
1667 *
1668 * @param pVM Pointer to the VM.
1669 * @param pHlp The info helper functions.
1670 * @param pszArgs Arguments, ignored.
1671 */
1672static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1673{
1674 NOREF(pszArgs);
1675
1676 /** @todo SMP support! */
1677 PVMCPU pVCpu = VMMGetCpu(pVM);
1678 if (!pVCpu)
1679 pVCpu = &pVM->aCpus[0];
1680
1681 char szInstruction[256];
1682 szInstruction[0] = '\0';
1683 DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
1684 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
1685}
1686
1687
1688/**
1689 * Display the hypervisor cpu state.
1690 *
1691 * @param pVM Pointer to the VM.
1692 * @param pHlp The info helper functions.
1693 * @param pszArgs Arguments, ignored.
1694 */
1695static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1696{
1697 CPUMDUMPTYPE enmType;
1698 const char *pszComment;
1699 /* @todo SMP */
1700 PVMCPU pVCpu = &pVM->aCpus[0];
1701
1702 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1703 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
1704 cpumR3InfoOne(pVM, &pVCpu->cpum.s.Hyper, CPUMCTX2CORE(&pVCpu->cpum.s.Hyper), pHlp, enmType, ".");
1705 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
1706}
1707
1708
1709/**
1710 * Display the host cpu state.
1711 *
1712 * @param pVM Pointer to the VM.
1713 * @param pHlp The info helper functions.
1714 * @param pszArgs Arguments, ignored.
1715 */
1716static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1717{
1718 CPUMDUMPTYPE enmType;
1719 const char *pszComment;
1720 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1721 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
1722
1723 /*
1724 * Format the EFLAGS.
1725 */
1726 /* @todo SMP */
1727 PCPUMHOSTCTX pCtx = &pVM->aCpus[0].cpum.s.Host;
1728#if HC_ARCH_BITS == 32
1729 uint32_t efl = pCtx->eflags.u32;
1730#else
1731 uint64_t efl = pCtx->rflags;
1732#endif
1733 char szEFlags[80];
1734 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1735
1736 /*
1737 * Format the registers.
1738 */
1739#if HC_ARCH_BITS == 32
1740# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1741 if (!(pCtx->efer & MSR_K6_EFER_LMA))
1742# endif
1743 {
1744 pHlp->pfnPrintf(pHlp,
1745 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
1746 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
1747 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
1748 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
1749 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n"
1750 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1751 ,
1752 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
1753 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
1754 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
1755 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
1756 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
1757 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->ldtr,
1758 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1759 }
1760# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1761 else
1762# endif
1763#endif
1764#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1765 {
1766 pHlp->pfnPrintf(pHlp,
1767 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
1768 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
1769 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
1770 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
1771 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1772 "r14=%016RX64 r15=%016RX64\n"
1773 "iopl=%d %31s\n"
1774 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
1775 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
1776 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
1777 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n"
1778 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n"
1779 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
1780 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1781 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
1782 ,
1783 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
1784 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
1785 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
1786 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
1787 pCtx->r11, pCtx->r12, pCtx->r13,
1788 pCtx->r14, pCtx->r15,
1789 X86_EFL_GET_IOPL(efl), szEFlags,
1790 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
1791 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
1792 pCtx->cr4, pCtx->ldtr, pCtx->tr,
1793 pCtx->dr0, pCtx->dr1, pCtx->dr2,
1794 pCtx->dr3, pCtx->dr6, pCtx->dr7,
1795 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
1796 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1797 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
1798 }
1799#endif
1800}
1801
1802
1803/**
1804 * Get L1 cache / TLS associativity.
1805 */
1806static const char *getCacheAss(unsigned u, char *pszBuf)
1807{
1808 if (u == 0)
1809 return "res0 ";
1810 if (u == 1)
1811 return "direct";
1812 if (u == 255)
1813 return "fully";
1814 if (u >= 256)
1815 return "???";
1816
1817 RTStrPrintf(pszBuf, 16, "%d way", u);
1818 return pszBuf;
1819}
1820
1821
1822/**
1823 * Get L2 cache associativity.
1824 */
1825const char *getL2CacheAss(unsigned u)
1826{
1827 switch (u)
1828 {
1829 case 0: return "off ";
1830 case 1: return "direct";
1831 case 2: return "2 way ";
1832 case 3: return "res3 ";
1833 case 4: return "4 way ";
1834 case 5: return "res5 ";
1835 case 6: return "8 way ";
1836 case 7: return "res7 ";
1837 case 8: return "16 way";
1838 case 9: return "res9 ";
1839 case 10: return "res10 ";
1840 case 11: return "res11 ";
1841 case 12: return "res12 ";
1842 case 13: return "res13 ";
1843 case 14: return "res14 ";
1844 case 15: return "fully ";
1845 default: return "????";
1846 }
1847}
1848
1849
1850/**
1851 * Display the guest CpuId leaves.
1852 *
1853 * @param pVM Pointer to the VM.
1854 * @param pHlp The info helper functions.
1855 * @param pszArgs "terse", "default" or "verbose".
1856 */
1857static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1858{
1859 /*
1860 * Parse the argument.
1861 */
1862 unsigned iVerbosity = 1;
1863 if (pszArgs)
1864 {
1865 pszArgs = RTStrStripL(pszArgs);
1866 if (!strcmp(pszArgs, "terse"))
1867 iVerbosity--;
1868 else if (!strcmp(pszArgs, "verbose"))
1869 iVerbosity++;
1870 }
1871
1872 /*
1873 * Start cracking.
1874 */
1875 CPUMCPUID Host;
1876 CPUMCPUID Guest;
1877 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdPatmStd[0].eax;
1878
1879 uint32_t cStdHstMax;
1880 uint32_t dummy;
1881 ASMCpuIdExSlow(0, 0, 0, 0, &cStdHstMax, &dummy, &dummy, &dummy);
1882
1883 unsigned cStdLstMax = RT_MAX(RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd), cStdHstMax);
1884
1885 pHlp->pfnPrintf(pHlp,
1886 " RAW Standard CPUIDs\n"
1887 " Function eax ebx ecx edx\n");
1888 for (unsigned i = 0; i <= cStdLstMax ; i++)
1889 {
1890 if (i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
1891 {
1892 Guest = pVM->cpum.s.aGuestCpuIdPatmStd[i];
1893 ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1894
1895 pHlp->pfnPrintf(pHlp,
1896 "Gst: %08x %08x %08x %08x %08x%s\n"
1897 "Hst: %08x %08x %08x %08x\n",
1898 i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1899 i <= cStdMax ? "" : "*",
1900 Host.eax, Host.ebx, Host.ecx, Host.edx);
1901 }
1902 else
1903 {
1904 ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1905
1906 pHlp->pfnPrintf(pHlp,
1907 "Hst: %08x %08x %08x %08x %08x\n",
1908 i, Host.eax, Host.ebx, Host.ecx, Host.edx);
1909 }
1910 }
1911
1912 /*
1913 * If verbose, decode it.
1914 */
1915 if (iVerbosity)
1916 {
1917 Guest = pVM->cpum.s.aGuestCpuIdPatmStd[0];
1918 pHlp->pfnPrintf(pHlp,
1919 "Name: %.04s%.04s%.04s\n"
1920 "Supports: 0-%x\n",
1921 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
1922 }
1923
1924 /*
1925 * Get Features.
1926 */
1927 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdPatmStd[0].ebx,
1928 pVM->cpum.s.aGuestCpuIdPatmStd[0].ecx,
1929 pVM->cpum.s.aGuestCpuIdPatmStd[0].edx);
1930 if (cStdMax >= 1 && iVerbosity)
1931 {
1932 static const char * const s_apszTypes[4] = { "primary", "overdrive", "MP", "reserved" };
1933
1934 Guest = pVM->cpum.s.aGuestCpuIdPatmStd[1];
1935 uint32_t uEAX = Guest.eax;
1936
1937 pHlp->pfnPrintf(pHlp,
1938 "Family: %d \tExtended: %d \tEffective: %d\n"
1939 "Model: %d \tExtended: %d \tEffective: %d\n"
1940 "Stepping: %d\n"
1941 "Type: %d (%s)\n"
1942 "APIC ID: %#04x\n"
1943 "Logical CPUs: %d\n"
1944 "CLFLUSH Size: %d\n"
1945 "Brand ID: %#04x\n",
1946 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
1947 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
1948 ASMGetCpuStepping(uEAX),
1949 (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
1950 (Guest.ebx >> 24) & 0xff,
1951 (Guest.ebx >> 16) & 0xff,
1952 (Guest.ebx >> 8) & 0xff,
1953 (Guest.ebx >> 0) & 0xff);
1954 if (iVerbosity == 1)
1955 {
1956 uint32_t uEDX = Guest.edx;
1957 pHlp->pfnPrintf(pHlp, "Features EDX: ");
1958 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
1959 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
1960 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
1961 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
1962 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
1963 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
1964 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
1965 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
1966 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
1967 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
1968 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
1969 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SEP");
1970 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
1971 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
1972 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
1973 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
1974 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
1975 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
1976 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " PSN");
1977 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " CLFSH");
1978 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " 20");
1979 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " DS");
1980 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ACPI");
1981 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
1982 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
1983 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " SSE");
1984 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " SSE2");
1985 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " SS");
1986 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " HTT");
1987 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " TM");
1988 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
1989 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " PBE");
1990 pHlp->pfnPrintf(pHlp, "\n");
1991
1992 uint32_t uECX = Guest.ecx;
1993 pHlp->pfnPrintf(pHlp, "Features ECX: ");
1994 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3");
1995 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " PCLMUL");
1996 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DTES64");
1997 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " MONITOR");
1998 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " DS-CPL");
1999 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " VMX");
2000 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SMX");
2001 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " EST");
2002 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " TM2");
2003 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " SSSE3");
2004 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " CNXT-ID");
2005 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " 11");
2006 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " FMA");
2007 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " CX16");
2008 if (uECX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " TPRUPDATE");
2009 if (uECX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " PDCM");
2010 if (uECX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " 16");
2011 if (uECX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PCID");
2012 if (uECX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " DCA");
2013 if (uECX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " SSE4.1");
2014 if (uECX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " SSE4.2");
2015 if (uECX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " X2APIC");
2016 if (uECX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " MOVBE");
2017 if (uECX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " POPCNT");
2018 if (uECX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " TSCDEADL");
2019 if (uECX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " AES");
2020 if (uECX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " XSAVE");
2021 if (uECX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " OSXSAVE");
2022 if (uECX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " AVX");
2023 if (uECX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " F16C");
2024 if (uECX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " RDRAND");
2025 if (uECX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " HVP");
2026 pHlp->pfnPrintf(pHlp, "\n");
2027 }
2028 else
2029 {
2030 ASMCpuIdExSlow(1, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2031
2032 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
2033 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
2034 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
2035 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
2036
2037 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2038 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", EdxGuest.u1FPU, EdxHost.u1FPU);
2039 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", EdxGuest.u1VME, EdxHost.u1VME);
2040 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", EdxGuest.u1DE, EdxHost.u1DE);
2041 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", EdxGuest.u1PSE, EdxHost.u1PSE);
2042 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", EdxGuest.u1TSC, EdxHost.u1TSC);
2043 pHlp->pfnPrintf(pHlp, "MSR - Model Specific Registers = %d (%d)\n", EdxGuest.u1MSR, EdxHost.u1MSR);
2044 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", EdxGuest.u1PAE, EdxHost.u1PAE);
2045 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", EdxGuest.u1MCE, EdxHost.u1MCE);
2046 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", EdxGuest.u1CX8, EdxHost.u1CX8);
2047 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", EdxGuest.u1APIC, EdxHost.u1APIC);
2048 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", EdxGuest.u1Reserved1, EdxHost.u1Reserved1);
2049 pHlp->pfnPrintf(pHlp, "SEP - SYSENTER and SYSEXIT = %d (%d)\n", EdxGuest.u1SEP, EdxHost.u1SEP);
2050 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", EdxGuest.u1MTRR, EdxHost.u1MTRR);
2051 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", EdxGuest.u1PGE, EdxHost.u1PGE);
2052 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", EdxGuest.u1MCA, EdxHost.u1MCA);
2053 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", EdxGuest.u1CMOV, EdxHost.u1CMOV);
2054 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", EdxGuest.u1PAT, EdxHost.u1PAT);
2055 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", EdxGuest.u1PSE36, EdxHost.u1PSE36);
2056 pHlp->pfnPrintf(pHlp, "PSN - Processor Serial Number = %d (%d)\n", EdxGuest.u1PSN, EdxHost.u1PSN);
2057 pHlp->pfnPrintf(pHlp, "CLFSH - CLFLUSH Instruction. = %d (%d)\n", EdxGuest.u1CLFSH, EdxHost.u1CLFSH);
2058 pHlp->pfnPrintf(pHlp, "20 - Reserved = %d (%d)\n", EdxGuest.u1Reserved2, EdxHost.u1Reserved2);
2059 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", EdxGuest.u1DS, EdxHost.u1DS);
2060 pHlp->pfnPrintf(pHlp, "ACPI - Thermal Mon. & Soft. Clock Ctrl.= %d (%d)\n", EdxGuest.u1ACPI, EdxHost.u1ACPI);
2061 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", EdxGuest.u1MMX, EdxHost.u1MMX);
2062 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", EdxGuest.u1FXSR, EdxHost.u1FXSR);
2063 pHlp->pfnPrintf(pHlp, "SSE - SSE Support = %d (%d)\n", EdxGuest.u1SSE, EdxHost.u1SSE);
2064 pHlp->pfnPrintf(pHlp, "SSE2 - SSE2 Support = %d (%d)\n", EdxGuest.u1SSE2, EdxHost.u1SSE2);
2065 pHlp->pfnPrintf(pHlp, "SS - Self Snoop = %d (%d)\n", EdxGuest.u1SS, EdxHost.u1SS);
2066 pHlp->pfnPrintf(pHlp, "HTT - Hyper-Threading Technology = %d (%d)\n", EdxGuest.u1HTT, EdxHost.u1HTT);
2067 pHlp->pfnPrintf(pHlp, "TM - Thermal Monitor = %d (%d)\n", EdxGuest.u1TM, EdxHost.u1TM);
2068 pHlp->pfnPrintf(pHlp, "30 - Reserved = %d (%d)\n", EdxGuest.u1Reserved3, EdxHost.u1Reserved3);
2069 pHlp->pfnPrintf(pHlp, "PBE - Pending Break Enable = %d (%d)\n", EdxGuest.u1PBE, EdxHost.u1PBE);
2070
2071 pHlp->pfnPrintf(pHlp, "Supports SSE3 = %d (%d)\n", EcxGuest.u1SSE3, EcxHost.u1SSE3);
2072 pHlp->pfnPrintf(pHlp, "PCLMULQDQ = %d (%d)\n", EcxGuest.u1PCLMULQDQ, EcxHost.u1PCLMULQDQ);
2073 pHlp->pfnPrintf(pHlp, "DS Area 64-bit layout = %d (%d)\n", EcxGuest.u1DTE64, EcxHost.u1DTE64);
2074 pHlp->pfnPrintf(pHlp, "Supports MONITOR/MWAIT = %d (%d)\n", EcxGuest.u1Monitor, EcxHost.u1Monitor);
2075 pHlp->pfnPrintf(pHlp, "CPL-DS - CPL Qualified Debug Store = %d (%d)\n", EcxGuest.u1CPLDS, EcxHost.u1CPLDS);
2076 pHlp->pfnPrintf(pHlp, "VMX - Virtual Machine Technology = %d (%d)\n", EcxGuest.u1VMX, EcxHost.u1VMX);
2077 pHlp->pfnPrintf(pHlp, "SMX - Safer Mode Extensions = %d (%d)\n", EcxGuest.u1SMX, EcxHost.u1SMX);
2078 pHlp->pfnPrintf(pHlp, "Enhanced SpeedStep Technology = %d (%d)\n", EcxGuest.u1EST, EcxHost.u1EST);
2079 pHlp->pfnPrintf(pHlp, "Terminal Monitor 2 = %d (%d)\n", EcxGuest.u1TM2, EcxHost.u1TM2);
2080 pHlp->pfnPrintf(pHlp, "Supplemental SSE3 instructions = %d (%d)\n", EcxGuest.u1SSSE3, EcxHost.u1SSSE3);
2081 pHlp->pfnPrintf(pHlp, "L1 Context ID = %d (%d)\n", EcxGuest.u1CNTXID, EcxHost.u1CNTXID);
2082 pHlp->pfnPrintf(pHlp, "11 - Reserved = %d (%d)\n", EcxGuest.u1Reserved1, EcxHost.u1Reserved1);
2083 pHlp->pfnPrintf(pHlp, "FMA extensions using YMM state = %d (%d)\n", EcxGuest.u1FMA, EcxHost.u1FMA);
2084 pHlp->pfnPrintf(pHlp, "CMPXCHG16B instruction = %d (%d)\n", EcxGuest.u1CX16, EcxHost.u1CX16);
2085 pHlp->pfnPrintf(pHlp, "xTPR Update Control = %d (%d)\n", EcxGuest.u1TPRUpdate, EcxHost.u1TPRUpdate);
2086 pHlp->pfnPrintf(pHlp, "Perf/Debug Capability MSR = %d (%d)\n", EcxGuest.u1PDCM, EcxHost.u1PDCM);
2087 pHlp->pfnPrintf(pHlp, "16 - Reserved = %d (%d)\n", EcxGuest.u1Reserved2, EcxHost.u1Reserved2);
2088 pHlp->pfnPrintf(pHlp, "PCID - Process-context identifiers = %d (%d)\n", EcxGuest.u1PCID, EcxHost.u1PCID);
2089 pHlp->pfnPrintf(pHlp, "DCA - Direct Cache Access = %d (%d)\n", EcxGuest.u1DCA, EcxHost.u1DCA);
2090 pHlp->pfnPrintf(pHlp, "SSE4.1 instruction extensions = %d (%d)\n", EcxGuest.u1SSE4_1, EcxHost.u1SSE4_1);
2091 pHlp->pfnPrintf(pHlp, "SSE4.2 instruction extensions = %d (%d)\n", EcxGuest.u1SSE4_2, EcxHost.u1SSE4_2);
2092 pHlp->pfnPrintf(pHlp, "Supports the x2APIC extensions = %d (%d)\n", EcxGuest.u1x2APIC, EcxHost.u1x2APIC);
2093 pHlp->pfnPrintf(pHlp, "MOVBE instruction = %d (%d)\n", EcxGuest.u1MOVBE, EcxHost.u1MOVBE);
2094 pHlp->pfnPrintf(pHlp, "POPCNT instruction = %d (%d)\n", EcxGuest.u1POPCNT, EcxHost.u1POPCNT);
2095 pHlp->pfnPrintf(pHlp, "TSC-Deadline LAPIC timer mode = %d (%d)\n", EcxGuest.u1TSCDEADLINE,EcxHost.u1TSCDEADLINE);
2096 pHlp->pfnPrintf(pHlp, "AESNI instruction extensions = %d (%d)\n", EcxGuest.u1AES, EcxHost.u1AES);
2097 pHlp->pfnPrintf(pHlp, "XSAVE/XRSTOR extended state feature = %d (%d)\n", EcxGuest.u1XSAVE, EcxHost.u1XSAVE);
2098 pHlp->pfnPrintf(pHlp, "Supports OSXSAVE = %d (%d)\n", EcxGuest.u1OSXSAVE, EcxHost.u1OSXSAVE);
2099 pHlp->pfnPrintf(pHlp, "AVX instruction extensions = %d (%d)\n", EcxGuest.u1AVX, EcxHost.u1AVX);
2100 pHlp->pfnPrintf(pHlp, "16-bit floating point conversion instr = %d (%d)\n", EcxGuest.u1F16C, EcxHost.u1F16C);
2101 pHlp->pfnPrintf(pHlp, "RDRAND instruction = %d (%d)\n", EcxGuest.u1RDRAND, EcxHost.u1RDRAND);
2102 pHlp->pfnPrintf(pHlp, "Hypervisor Present (we're a guest) = %d (%d)\n", EcxGuest.u1HVP, EcxHost.u1HVP);
2103 }
2104 }
2105 if (cStdMax >= 2 && iVerbosity)
2106 {
2107 /** @todo */
2108 }
2109
2110 /*
2111 * Extended.
2112 * Implemented after AMD specs.
2113 */
2114 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdPatmExt[0].eax & 0xffff;
2115
2116 pHlp->pfnPrintf(pHlp,
2117 "\n"
2118 " RAW Extended CPUIDs\n"
2119 " Function eax ebx ecx edx\n");
2120 bool fSupportsInvariantTsc = false;
2121 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt); i++)
2122 {
2123 Guest = pVM->cpum.s.aGuestCpuIdPatmExt[i];
2124 ASMCpuIdExSlow(0x80000000 | i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2125
2126 if ( i == 7
2127 && (Host.edx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR))
2128 {
2129 fSupportsInvariantTsc = true;
2130 }
2131 pHlp->pfnPrintf(pHlp,
2132 "Gst: %08x %08x %08x %08x %08x%s\n"
2133 "Hst: %08x %08x %08x %08x\n",
2134 0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
2135 i <= cExtMax ? "" : "*",
2136 Host.eax, Host.ebx, Host.ecx, Host.edx);
2137 }
2138
2139 /*
2140 * Understandable output
2141 */
2142 if (iVerbosity)
2143 {
2144 Guest = pVM->cpum.s.aGuestCpuIdPatmExt[0];
2145 pHlp->pfnPrintf(pHlp,
2146 "Ext Name: %.4s%.4s%.4s\n"
2147 "Ext Supports: 0x80000000-%#010x\n",
2148 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
2149 }
2150
2151 if (iVerbosity && cExtMax >= 1)
2152 {
2153 Guest = pVM->cpum.s.aGuestCpuIdPatmExt[1];
2154 uint32_t uEAX = Guest.eax;
2155 pHlp->pfnPrintf(pHlp,
2156 "Family: %d \tExtended: %d \tEffective: %d\n"
2157 "Model: %d \tExtended: %d \tEffective: %d\n"
2158 "Stepping: %d\n"
2159 "Brand ID: %#05x\n",
2160 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
2161 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
2162 ASMGetCpuStepping(uEAX),
2163 Guest.ebx & 0xfff);
2164
2165 if (iVerbosity == 1)
2166 {
2167 uint32_t uEDX = Guest.edx;
2168 pHlp->pfnPrintf(pHlp, "Features EDX: ");
2169 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
2170 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
2171 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
2172 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
2173 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
2174 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
2175 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
2176 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
2177 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
2178 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
2179 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
2180 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SCR");
2181 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
2182 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
2183 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
2184 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
2185 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
2186 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
2187 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " 18");
2188 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " 19");
2189 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " NX");
2190 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " 21");
2191 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ExtMMX");
2192 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
2193 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
2194 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " FastFXSR");
2195 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " Page1GB");
2196 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " RDTSCP");
2197 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " 28");
2198 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " LongMode");
2199 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " Ext3DNow");
2200 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 3DNow");
2201 pHlp->pfnPrintf(pHlp, "\n");
2202
2203 uint32_t uECX = Guest.ecx;
2204 pHlp->pfnPrintf(pHlp, "Features ECX: ");
2205 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
2206 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " CMPL");
2207 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " SVM");
2208 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " ExtAPIC");
2209 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " CR8L");
2210 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " ABM");
2211 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SSE4A");
2212 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MISALNSSE");
2213 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " 3DNOWPRF");
2214 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " OSVW");
2215 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " IBS");
2216 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SSE5");
2217 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " SKINIT");
2218 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " WDT");
2219 for (unsigned iBit = 5; iBit < 32; iBit++)
2220 if (uECX & RT_BIT(iBit))
2221 pHlp->pfnPrintf(pHlp, " %d", iBit);
2222 pHlp->pfnPrintf(pHlp, "\n");
2223 }
2224 else
2225 {
2226 ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2227
2228 uint32_t uEdxGst = Guest.edx;
2229 uint32_t uEdxHst = Host.edx;
2230 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2231 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
2232 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
2233 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
2234 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
2235 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
2236 pHlp->pfnPrintf(pHlp, "MSR - K86 Model Specific Registers = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
2237 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
2238 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
2239 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
2240 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
2241 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
2242 pHlp->pfnPrintf(pHlp, "SEP - SYSCALL and SYSRET = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
2243 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
2244 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
2245 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
2246 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
2247 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
2248 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
2249 pHlp->pfnPrintf(pHlp, "18 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
2250 pHlp->pfnPrintf(pHlp, "19 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
2251 pHlp->pfnPrintf(pHlp, "NX - No-Execute Page Protection = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
2252 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
2253 pHlp->pfnPrintf(pHlp, "AXMMX - AMD Extensions to MMX Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
2254 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
2255 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
2256 pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
2257 pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
2258 pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));
2259 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28)));
2260 pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));
2261 pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow! = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));
2262 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow! = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));
2263
2264 uint32_t uEcxGst = Guest.ecx;
2265 uint32_t uEcxHst = Host.ecx;
2266 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0)));
2267 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1)));
2268 pHlp->pfnPrintf(pHlp, "SVM - AMD VM Extensions = %d (%d)\n", !!(uEcxGst & RT_BIT( 2)), !!(uEcxHst & RT_BIT( 2)));
2269 pHlp->pfnPrintf(pHlp, "APIC registers starting at 0x400 = %d (%d)\n", !!(uEcxGst & RT_BIT( 3)), !!(uEcxHst & RT_BIT( 3)));
2270 pHlp->pfnPrintf(pHlp, "AltMovCR8 - LOCK MOV CR0 means MOV CR8 = %d (%d)\n", !!(uEcxGst & RT_BIT( 4)), !!(uEcxHst & RT_BIT( 4)));
2271 pHlp->pfnPrintf(pHlp, "5 - Advanced bit manipulation = %d (%d)\n", !!(uEcxGst & RT_BIT( 5)), !!(uEcxHst & RT_BIT( 5)));
2272 pHlp->pfnPrintf(pHlp, "6 - SSE4A instruction support = %d (%d)\n", !!(uEcxGst & RT_BIT( 6)), !!(uEcxHst & RT_BIT( 6)));
2273 pHlp->pfnPrintf(pHlp, "7 - Misaligned SSE mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 7)), !!(uEcxHst & RT_BIT( 7)));
2274 pHlp->pfnPrintf(pHlp, "8 - PREFETCH and PREFETCHW instruction= %d (%d)\n", !!(uEcxGst & RT_BIT( 8)), !!(uEcxHst & RT_BIT( 8)));
2275 pHlp->pfnPrintf(pHlp, "9 - OS visible workaround = %d (%d)\n", !!(uEcxGst & RT_BIT( 9)), !!(uEcxHst & RT_BIT( 9)));
2276 pHlp->pfnPrintf(pHlp, "10 - Instruction based sampling = %d (%d)\n", !!(uEcxGst & RT_BIT(10)), !!(uEcxHst & RT_BIT(10)));
2277 pHlp->pfnPrintf(pHlp, "11 - SSE5 support = %d (%d)\n", !!(uEcxGst & RT_BIT(11)), !!(uEcxHst & RT_BIT(11)));
2278 pHlp->pfnPrintf(pHlp, "12 - SKINIT, STGI, and DEV support = %d (%d)\n", !!(uEcxGst & RT_BIT(12)), !!(uEcxHst & RT_BIT(12)));
2279 pHlp->pfnPrintf(pHlp, "13 - Watchdog timer support. = %d (%d)\n", !!(uEcxGst & RT_BIT(13)), !!(uEcxHst & RT_BIT(13)));
2280 pHlp->pfnPrintf(pHlp, "31:14 - Reserved = %#x (%#x)\n", uEcxGst >> 14, uEcxHst >> 14);
2281 }
2282 }
2283
2284 if (iVerbosity && cExtMax >= 2)
2285 {
2286 char szString[4*4*3+1] = {0};
2287 uint32_t *pu32 = (uint32_t *)szString;
2288 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].eax;
2289 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].ebx;
2290 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].ecx;
2291 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].edx;
2292 if (cExtMax >= 3)
2293 {
2294 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].eax;
2295 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].ebx;
2296 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].ecx;
2297 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].edx;
2298 }
2299 if (cExtMax >= 4)
2300 {
2301 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].eax;
2302 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].ebx;
2303 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].ecx;
2304 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].edx;
2305 }
2306 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString);
2307 }
2308
2309 if (iVerbosity && cExtMax >= 5)
2310 {
2311 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[5].eax;
2312 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[5].ebx;
2313 uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[5].ecx;
2314 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[5].edx;
2315 char sz1[32];
2316 char sz2[32];
2317
2318 pHlp->pfnPrintf(pHlp,
2319 "TLB 2/4M Instr/Uni: %s %3d entries\n"
2320 "TLB 2/4M Data: %s %3d entries\n",
2321 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
2322 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
2323 pHlp->pfnPrintf(pHlp,
2324 "TLB 4K Instr/Uni: %s %3d entries\n"
2325 "TLB 4K Data: %s %3d entries\n",
2326 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
2327 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
2328 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
2329 "L1 Instr Cache Lines Per Tag: %d\n"
2330 "L1 Instr Cache Associativity: %s\n"
2331 "L1 Instr Cache Size: %d KB\n",
2332 (uEDX >> 0) & 0xff,
2333 (uEDX >> 8) & 0xff,
2334 getCacheAss((uEDX >> 16) & 0xff, sz1),
2335 (uEDX >> 24) & 0xff);
2336 pHlp->pfnPrintf(pHlp,
2337 "L1 Data Cache Line Size: %d bytes\n"
2338 "L1 Data Cache Lines Per Tag: %d\n"
2339 "L1 Data Cache Associativity: %s\n"
2340 "L1 Data Cache Size: %d KB\n",
2341 (uECX >> 0) & 0xff,
2342 (uECX >> 8) & 0xff,
2343 getCacheAss((uECX >> 16) & 0xff, sz1),
2344 (uECX >> 24) & 0xff);
2345 }
2346
2347 if (iVerbosity && cExtMax >= 6)
2348 {
2349 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[6].eax;
2350 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[6].ebx;
2351 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[6].edx;
2352
2353 pHlp->pfnPrintf(pHlp,
2354 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
2355 "L2 TLB 2/4M Data: %s %4d entries\n",
2356 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
2357 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
2358 pHlp->pfnPrintf(pHlp,
2359 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
2360 "L2 TLB 4K Data: %s %4d entries\n",
2361 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
2362 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
2363 pHlp->pfnPrintf(pHlp,
2364 "L2 Cache Line Size: %d bytes\n"
2365 "L2 Cache Lines Per Tag: %d\n"
2366 "L2 Cache Associativity: %s\n"
2367 "L2 Cache Size: %d KB\n",
2368 (uEDX >> 0) & 0xff,
2369 (uEDX >> 8) & 0xf,
2370 getL2CacheAss((uEDX >> 12) & 0xf),
2371 (uEDX >> 16) & 0xffff);
2372 }
2373
2374 if (iVerbosity && cExtMax >= 7)
2375 {
2376 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[7].edx;
2377
2378 pHlp->pfnPrintf(pHlp, "Host Invariant-TSC support: %RTbool\n", fSupportsInvariantTsc);
2379 pHlp->pfnPrintf(pHlp, "APM Features: ");
2380 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " TS");
2381 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " FID");
2382 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " VID");
2383 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " TTP");
2384 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TM");
2385 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " STC");
2386 for (unsigned iBit = 6; iBit < 32; iBit++)
2387 {
2388 if (uEDX & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR)
2389 pHlp->pfnPrintf(pHlp, " TSCINVARIANT");
2390 else if (uEDX & RT_BIT(iBit))
2391 pHlp->pfnPrintf(pHlp, " %d", iBit);
2392 }
2393 pHlp->pfnPrintf(pHlp, "\n");
2394 }
2395
2396 if (iVerbosity && cExtMax >= 8)
2397 {
2398 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[8].eax;
2399 uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[8].ecx;
2400
2401 pHlp->pfnPrintf(pHlp,
2402 "Physical Address Width: %d bits\n"
2403 "Virtual Address Width: %d bits\n"
2404 "Guest Physical Address Width: %d bits\n",
2405 (uEAX >> 0) & 0xff,
2406 (uEAX >> 8) & 0xff,
2407 (uEAX >> 16) & 0xff);
2408 pHlp->pfnPrintf(pHlp,
2409 "Physical Core Count: %d\n",
2410 (uECX >> 0) & 0xff);
2411 }
2412
2413
2414 /*
2415 * Hypervisor leaves.
2416 *
2417 * Unlike most of the other leaves reported, the guest hypervisor leaves
2418 * aren't a subset of the host CPUID bits.
2419 */
2420 RT_ZERO(Host);
2421 if (cStdHstMax >= 1)
2422 ASMCpuIdExSlow(1, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2423 bool fHostHvp = RT_BOOL(Host.ecx & X86_CPUID_FEATURE_ECX_HVP);
2424 bool fGuestHvp = false;
2425 if (cStdMax >= 1)
2426 {
2427 Guest = pVM->cpum.s.aGuestCpuIdPatmStd[1];
2428 fGuestHvp = RT_BOOL(Guest.ecx & X86_CPUID_FEATURE_ECX_HVP);
2429 }
2430
2431 if ( fHostHvp
2432 || fGuestHvp)
2433 {
2434 uint32_t const uHyperLeaf = 0x40000000;
2435 pHlp->pfnPrintf(pHlp,
2436 "\n"
2437 " Hypervisor CPUIDs\n"
2438 " Function eax ebx ecx edx\n");
2439
2440 PCCPUMCPUIDLEAF pHyperLeafGst = NULL;
2441 if (fGuestHvp)
2442 {
2443 pHyperLeafGst = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
2444 uHyperLeaf, 0 /* uSubLeaf */);
2445 }
2446
2447 RT_ZERO(Host);
2448 if (fHostHvp)
2449 ASMCpuIdExSlow(uHyperLeaf, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2450
2451 CPUMCPUIDLEAF GuestLeaf;
2452 uint32_t const cHyperGstMax = pHyperLeafGst ? pHyperLeafGst->uEax : 0;
2453 uint32_t const cHyperHstMax = Host.eax;
2454 uint32_t const cHyperMax = RT_MAX(cHyperHstMax, cHyperGstMax);
2455 for (uint32_t i = uHyperLeaf; i <= cHyperMax; i++)
2456 {
2457 RT_ZERO(Host);
2458 RT_ZERO(GuestLeaf);
2459 if (i <= cHyperHstMax)
2460 ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2461 CPUMR3CpuIdGetLeaf(pVM, &GuestLeaf, i, 0 /* uSubLeaf */);
2462 if (!fHostHvp)
2463 {
2464 pHlp->pfnPrintf(pHlp,
2465 "Gst: %08x %08x %08x %08x %08x\n",
2466 i, GuestLeaf.uEax, GuestLeaf.uEbx, GuestLeaf.uEcx, GuestLeaf.uEdx);
2467 }
2468 else
2469 {
2470 pHlp->pfnPrintf(pHlp,
2471 "Gst: %08x %08x %08x %08x %08x%s\n"
2472 "Hst: %08x %08x %08x %08x%s\n",
2473 i, GuestLeaf.uEax, GuestLeaf.uEbx, GuestLeaf.uEcx, GuestLeaf.uEdx,
2474 i <= cHyperGstMax ? "" : "*",
2475 Host.eax, Host.ebx, Host.ecx, Host.edx, i <= cHyperHstMax ? "" : "*");
2476 }
2477 }
2478 }
2479
2480 /*
2481 * Centaur.
2482 */
2483 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdPatmCentaur[0].eax & 0xffff;
2484
2485 pHlp->pfnPrintf(pHlp,
2486 "\n"
2487 " RAW Centaur CPUIDs\n"
2488 " Function eax ebx ecx edx\n");
2489 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur); i++)
2490 {
2491 Guest = pVM->cpum.s.aGuestCpuIdPatmCentaur[i];
2492 ASMCpuIdExSlow(0xc0000000 | i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2493
2494 pHlp->pfnPrintf(pHlp,
2495 "Gst: %08x %08x %08x %08x %08x%s\n"
2496 "Hst: %08x %08x %08x %08x\n",
2497 0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
2498 i <= cCentaurMax ? "" : "*",
2499 Host.eax, Host.ebx, Host.ecx, Host.edx);
2500 }
2501
2502 /*
2503 * Understandable output
2504 */
2505 if (iVerbosity)
2506 {
2507 Guest = pVM->cpum.s.aGuestCpuIdPatmCentaur[0];
2508 pHlp->pfnPrintf(pHlp,
2509 "Centaur Supports: 0xc0000000-%#010x\n",
2510 Guest.eax);
2511 }
2512
2513 if (iVerbosity && cCentaurMax >= 1)
2514 {
2515 ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2516 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdPatmCentaur[1].edx;
2517 uint32_t uEdxHst = Host.edx;
2518
2519 if (iVerbosity == 1)
2520 {
2521 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
2522 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
2523 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
2524 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
2525 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
2526 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
2527 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
2528 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
2529 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
2530 /* possibly indicating MM/HE and MM/HE-E on older chips... */
2531 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
2532 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
2533 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
2534 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
2535 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
2536 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
2537 for (unsigned iBit = 14; iBit < 32; iBit++)
2538 if (uEdxGst & RT_BIT(iBit))
2539 pHlp->pfnPrintf(pHlp, " %d", iBit);
2540 pHlp->pfnPrintf(pHlp, "\n");
2541 }
2542 else
2543 {
2544 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2545 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
2546 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
2547 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
2548 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
2549 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
2550 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
2551 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
2552 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
2553 /* possibly indicating MM/HE and MM/HE-E on older chips... */
2554 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
2555 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
2556 pHlp->pfnPrintf(pHlp, "PHE - Padlock Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
2557 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
2558 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
2559 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
2560 pHlp->pfnPrintf(pHlp, "14 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
2561 pHlp->pfnPrintf(pHlp, "15 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
2562 pHlp->pfnPrintf(pHlp, "Parallax = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
2563 pHlp->pfnPrintf(pHlp, "Parallax enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
2564 pHlp->pfnPrintf(pHlp, "Overstress = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
2565 pHlp->pfnPrintf(pHlp, "Overstress enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
2566 pHlp->pfnPrintf(pHlp, "TM3 - Temperature Monitoring 3 = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
2567 pHlp->pfnPrintf(pHlp, "TM3-E - TM3 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
2568 pHlp->pfnPrintf(pHlp, "RNG2 - Random Number Generator 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
2569 pHlp->pfnPrintf(pHlp, "RNG2-E - RNG2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
2570 pHlp->pfnPrintf(pHlp, "24 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
2571 pHlp->pfnPrintf(pHlp, "PHE2 - Padlock Hash Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
2572 pHlp->pfnPrintf(pHlp, "PHE2-E - PHE2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
2573 for (unsigned iBit = 27; iBit < 32; iBit++)
2574 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
2575 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", iBit, !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
2576 pHlp->pfnPrintf(pHlp, "\n");
2577 }
2578 }
2579}
2580
2581
2582/**
2583 * Structure used when disassembling and instructions in DBGF.
2584 * This is used so the reader function can get the stuff it needs.
2585 */
2586typedef struct CPUMDISASSTATE
2587{
2588 /** Pointer to the CPU structure. */
2589 PDISCPUSTATE pCpu;
2590 /** Pointer to the VM. */
2591 PVM pVM;
2592 /** Pointer to the VMCPU. */
2593 PVMCPU pVCpu;
2594 /** Pointer to the first byte in the segment. */
2595 RTGCUINTPTR GCPtrSegBase;
2596 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
2597 RTGCUINTPTR GCPtrSegEnd;
2598 /** The size of the segment minus 1. */
2599 RTGCUINTPTR cbSegLimit;
2600 /** Pointer to the current page - R3 Ptr. */
2601 void const *pvPageR3;
2602 /** Pointer to the current page - GC Ptr. */
2603 RTGCPTR pvPageGC;
2604 /** The lock information that PGMPhysReleasePageMappingLock needs. */
2605 PGMPAGEMAPLOCK PageMapLock;
2606 /** Whether the PageMapLock is valid or not. */
2607 bool fLocked;
2608 /** 64 bits mode or not. */
2609 bool f64Bits;
2610} CPUMDISASSTATE, *PCPUMDISASSTATE;
2611
2612
2613/**
2614 * @callback_method_impl{FNDISREADBYTES}
2615 */
2616static DECLCALLBACK(int) cpumR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
2617{
2618 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pDis->pvUser;
2619 for (;;)
2620 {
2621 RTGCUINTPTR GCPtr = pDis->uInstrAddr + offInstr + pState->GCPtrSegBase;
2622
2623 /*
2624 * Need to update the page translation?
2625 */
2626 if ( !pState->pvPageR3
2627 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
2628 {
2629 int rc = VINF_SUCCESS;
2630
2631 /* translate the address */
2632 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
2633 if ( !HMIsEnabled(pState->pVM)
2634 && MMHyperIsInsideArea(pState->pVM, pState->pvPageGC))
2635 {
2636 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
2637 if (!pState->pvPageR3)
2638 rc = VERR_INVALID_POINTER;
2639 }
2640 else
2641 {
2642 /* Release mapping lock previously acquired. */
2643 if (pState->fLocked)
2644 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
2645 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->pvPageGC, &pState->pvPageR3, &pState->PageMapLock);
2646 pState->fLocked = RT_SUCCESS_NP(rc);
2647 }
2648 if (RT_FAILURE(rc))
2649 {
2650 pState->pvPageR3 = NULL;
2651 return rc;
2652 }
2653 }
2654
2655 /*
2656 * Check the segment limit.
2657 */
2658 if (!pState->f64Bits && pDis->uInstrAddr + offInstr > pState->cbSegLimit)
2659 return VERR_OUT_OF_SELECTOR_BOUNDS;
2660
2661 /*
2662 * Calc how much we can read.
2663 */
2664 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
2665 if (!pState->f64Bits)
2666 {
2667 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
2668 if (cb > cbSeg && cbSeg)
2669 cb = cbSeg;
2670 }
2671 if (cb > cbMaxRead)
2672 cb = cbMaxRead;
2673
2674 /*
2675 * Read and advance or exit.
2676 */
2677 memcpy(&pDis->abInstr[offInstr], (uint8_t *)pState->pvPageR3 + (GCPtr & PAGE_OFFSET_MASK), cb);
2678 offInstr += (uint8_t)cb;
2679 if (cb >= cbMinRead)
2680 {
2681 pDis->cbCachedInstr = offInstr;
2682 return VINF_SUCCESS;
2683 }
2684 cbMinRead -= (uint8_t)cb;
2685 cbMaxRead -= (uint8_t)cb;
2686 }
2687}
2688
2689
2690/**
2691 * Disassemble an instruction and return the information in the provided structure.
2692 *
2693 * @returns VBox status code.
2694 * @param pVM Pointer to the VM.
2695 * @param pVCpu Pointer to the VMCPU.
2696 * @param pCtx Pointer to the guest CPU context.
2697 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
2698 * @param pCpu Disassembly state.
2699 * @param pszPrefix String prefix for logging (debug only).
2700 *
2701 */
2702VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
2703{
2704 CPUMDISASSTATE State;
2705 int rc;
2706
2707 const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
2708 State.pCpu = pCpu;
2709 State.pvPageGC = 0;
2710 State.pvPageR3 = NULL;
2711 State.pVM = pVM;
2712 State.pVCpu = pVCpu;
2713 State.fLocked = false;
2714 State.f64Bits = false;
2715
2716 /*
2717 * Get selector information.
2718 */
2719 DISCPUMODE enmDisCpuMode;
2720 if ( (pCtx->cr0 & X86_CR0_PE)
2721 && pCtx->eflags.Bits.u1VM == 0)
2722 {
2723 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
2724 {
2725# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2726 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtx->cs);
2727# endif
2728 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
2729 return VERR_CPUM_HIDDEN_CS_LOAD_ERROR;
2730 }
2731 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->cs.Attr.n.u1Long;
2732 State.GCPtrSegBase = pCtx->cs.u64Base;
2733 State.GCPtrSegEnd = pCtx->cs.u32Limit + 1 + (RTGCUINTPTR)pCtx->cs.u64Base;
2734 State.cbSegLimit = pCtx->cs.u32Limit;
2735 enmDisCpuMode = (State.f64Bits)
2736 ? DISCPUMODE_64BIT
2737 : pCtx->cs.Attr.n.u1DefBig
2738 ? DISCPUMODE_32BIT
2739 : DISCPUMODE_16BIT;
2740 }
2741 else
2742 {
2743 /* real or V86 mode */
2744 enmDisCpuMode = DISCPUMODE_16BIT;
2745 State.GCPtrSegBase = pCtx->cs.Sel * 16;
2746 State.GCPtrSegEnd = 0xFFFFFFFF;
2747 State.cbSegLimit = 0xFFFFFFFF;
2748 }
2749
2750 /*
2751 * Disassemble the instruction.
2752 */
2753 uint32_t cbInstr;
2754#ifndef LOG_ENABLED
2755 rc = DISInstrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State, pCpu, &cbInstr);
2756 if (RT_SUCCESS(rc))
2757 {
2758#else
2759 char szOutput[160];
2760 rc = DISInstrToStrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State,
2761 pCpu, &cbInstr, szOutput, sizeof(szOutput));
2762 if (RT_SUCCESS(rc))
2763 {
2764 /* log it */
2765 if (pszPrefix)
2766 Log(("%s-CPU%d: %s", pszPrefix, pVCpu->idCpu, szOutput));
2767 else
2768 Log(("%s", szOutput));
2769#endif
2770 rc = VINF_SUCCESS;
2771 }
2772 else
2773 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%RGv rc=%Rrc\n", pCtx->cs.Sel, GCPtrPC, rc));
2774
2775 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
2776 if (State.fLocked)
2777 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
2778
2779 return rc;
2780}
2781
2782
2783
2784/**
2785 * API for controlling a few of the CPU features found in CR4.
2786 *
2787 * Currently only X86_CR4_TSD is accepted as input.
2788 *
2789 * @returns VBox status code.
2790 *
2791 * @param pVM Pointer to the VM.
2792 * @param fOr The CR4 OR mask.
2793 * @param fAnd The CR4 AND mask.
2794 */
2795VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
2796{
2797 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
2798 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
2799
2800 pVM->cpum.s.CR4.OrMask &= fAnd;
2801 pVM->cpum.s.CR4.OrMask |= fOr;
2802
2803 return VINF_SUCCESS;
2804}
2805
2806
2807/**
2808 * Enters REM, gets and resets the changed flags (CPUM_CHANGED_*).
2809 *
2810 * Only REM should ever call this function!
2811 *
2812 * @returns The changed flags.
2813 * @param pVCpu Pointer to the VMCPU.
2814 * @param puCpl Where to return the current privilege level (CPL).
2815 */
2816VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl)
2817{
2818 Assert(!pVCpu->cpum.s.fRawEntered);
2819 Assert(!pVCpu->cpum.s.fRemEntered);
2820
2821 /*
2822 * Get the CPL first.
2823 */
2824 *puCpl = CPUMGetGuestCPL(pVCpu);
2825
2826 /*
2827 * Get and reset the flags.
2828 */
2829 uint32_t fFlags = pVCpu->cpum.s.fChanged;
2830 pVCpu->cpum.s.fChanged = 0;
2831
2832 /** @todo change the switcher to use the fChanged flags. */
2833 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
2834 {
2835 fFlags |= CPUM_CHANGED_FPU_REM;
2836 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2837 }
2838
2839 pVCpu->cpum.s.fRemEntered = true;
2840 return fFlags;
2841}
2842
2843
2844/**
2845 * Leaves REM.
2846 *
2847 * @param pVCpu Pointer to the VMCPU.
2848 * @param fNoOutOfSyncSels This is @c false if there are out of sync
2849 * registers.
2850 */
2851VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels)
2852{
2853 Assert(!pVCpu->cpum.s.fRawEntered);
2854 Assert(pVCpu->cpum.s.fRemEntered);
2855
2856 pVCpu->cpum.s.fRemEntered = false;
2857}
2858
2859
2860/**
2861 * Called when the ring-3 init phase completes.
2862 *
2863 * @returns VBox status code.
2864 * @param pVM Pointer to the VM.
2865 */
2866VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM)
2867{
2868 /*
2869 * Figure out if the guest uses 32-bit or 64-bit FPU state at runtime for 64-bit capable VMs.
2870 * Only applicable/used on 64-bit hosts, refer CPUMR0A.asm. See @bugref{7138}.
2871 */
2872 bool const fSupportsLongMode = VMR3IsLongModeAllowed(pVM);
2873 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2874 {
2875 PVMCPU pVCpu = &pVM->aCpus[i];
2876
2877 /* Cache the APIC base (from the APIC device) once it has been initialized. */
2878 PDMApicGetBase(pVCpu, &pVCpu->cpum.s.Guest.msrApicBase);
2879 Log(("CPUMR3InitCompleted pVM=%p APIC base[%u]=%RX64\n", pVM, (unsigned)i, pVCpu->cpum.s.Guest.msrApicBase));
2880
2881 /* While loading a saved-state we fix it up in, cpumR3LoadDone(). */
2882 if (fSupportsLongMode)
2883 pVCpu->cpum.s.fUseFlags |= CPUM_USE_SUPPORTS_LONGMODE;
2884 }
2885 return VINF_SUCCESS;
2886}
2887
2888
2889/**
2890 * Called when the ring-0 init phases comleted.
2891 *
2892 * @param pVM Pointer to the VM.
2893 */
2894VMMR3DECL(void) CPUMR3LogCpuIds(PVM pVM)
2895{
2896 /*
2897 * Log the cpuid.
2898 */
2899 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2900 RTCPUSET OnlineSet;
2901 LogRel(("Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
2902 (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
2903 RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
2904 RTCPUID cCores = RTMpGetCoreCount();
2905 if (cCores)
2906 LogRel(("Physical host cores: %u\n", (unsigned)cCores));
2907 LogRel(("************************* CPUID dump ************************\n"));
2908 DBGFR3Info(pVM->pUVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
2909 LogRel(("\n"));
2910 DBGFR3_INFO_LOG(pVM, "cpuid", "verbose"); /* macro */
2911 RTLogRelSetBuffering(fOldBuffered);
2912 LogRel(("******************** End of CPUID dump **********************\n"));
2913}
2914
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette