VirtualBox

source: vbox/trunk/src/VBox/VMM/CPUM.cpp@ 10780

Last change on this file since 10780 was 10687, checked in by vboxsync, 16 years ago

Save the FPU control word and MXCSR on entry and restore them afterwards. (VT-x & AMD-V)
Security measure so the guest can't cause fpu/sse exceptions as we no longer restore the entire
host fpu state.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 102.5 KB
Line 
1/* $Id: CPUM.cpp 10687 2008-07-16 09:22:28Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_cpum
23 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
24 * also responsible for lazy FPU handling and some of the context loading
25 * in raw mode.
26 *
27 * There are three CPU contexts, the most important one is the guest one (GC).
28 * When running in raw-mode (RC) there is a special hyper context for the VMM
29 * that floats around inside the guest address space. When running in raw-mode
30 * or when using 64-bit guests on a 32-bit host, CPUM also maintains a host
31 * context for saving and restoring registers accross world switches. This latter
32 * is done in cooperation with the world switcher (@see pg_vmm).
33 */
34
35/*******************************************************************************
36* Header Files *
37*******************************************************************************/
38#define LOG_GROUP LOG_GROUP_CPUM
39#include <VBox/cpum.h>
40#include <VBox/cpumdis.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/mm.h>
44#include <VBox/selm.h>
45#include <VBox/dbgf.h>
46#include <VBox/patm.h>
47#include <VBox/ssm.h>
48#include "CPUMInternal.h"
49#include <VBox/vm.h>
50
51#include <VBox/param.h>
52#include <VBox/dis.h>
53#include <VBox/err.h>
54#include <VBox/log.h>
55#include <iprt/assert.h>
56#include <iprt/asm.h>
57#include <iprt/string.h>
58#include <iprt/system.h>
59
60
61/*******************************************************************************
62* Defined Constants And Macros *
63*******************************************************************************/
64/** The saved state version. */
65#define CPUM_SAVED_STATE_VERSION 8
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71
72/**
73 * What kind of cpu info dump to perform.
74 */
75typedef enum CPUMDUMPTYPE
76{
77 CPUMDUMPTYPE_TERSE,
78 CPUMDUMPTYPE_DEFAULT,
79 CPUMDUMPTYPE_VERBOSE
80
81} CPUMDUMPTYPE;
82/** Pointer to a cpu info dump type. */
83typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static int cpumR3CpuIdInit(PVM pVM);
89static DECLCALLBACK(int) cpumR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) cpumR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
92static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
93static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
94static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
95static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
96static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
97
98
99/**
100 * Initializes the CPUM.
101 *
102 * @returns VBox status code.
103 * @param pVM The VM to operate on.
104 */
105CPUMR3DECL(int) CPUMR3Init(PVM pVM)
106{
107 LogFlow(("CPUMR3Init\n"));
108
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertRelease(!(RT_OFFSETOF(VM, cpum.s) & 31));
113 AssertRelease(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
114
115 /*
116 * Setup any fixed pointers and offsets.
117 */
118 pVM->cpum.s.offVM = RT_OFFSETOF(VM, cpum);
119 pVM->cpum.s.pCPUMHC = &pVM->cpum.s;
120 pVM->cpum.s.pHyperCoreR3 = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
121 pVM->cpum.s.pHyperCoreR0 = VM_R0_ADDR(pVM, CPUMCTX2CORE(&pVM->cpum.s.Hyper));
122
123 /* Hidden selector registers are invalid by default. */
124 pVM->cpum.s.fValidHiddenSelRegs = false;
125
126 /*
127 * Check that the CPU supports the minimum features we require.
128 */
129 /** @todo check the contract! */
130 if (!ASMHasCpuId())
131 {
132 Log(("The CPU doesn't support CPUID!\n"));
133 return VERR_UNSUPPORTED_CPU;
134 }
135 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
136 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
137
138 /* Setup the CR4 AND and OR masks used in the switcher */
139 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
140 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
141 {
142 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
143 /* No FXSAVE implies no SSE */
144 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
145 pVM->cpum.s.CR4.OrMask = 0;
146 }
147 else
148 {
149 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
150 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
151 }
152
153 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
154 {
155 Log(("The CPU doesn't support MMX!\n"));
156 return VERR_UNSUPPORTED_CPU;
157 }
158 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
159 {
160 Log(("The CPU doesn't support TSC!\n"));
161 return VERR_UNSUPPORTED_CPU;
162 }
163 /* Bogus on AMD? */
164 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
165 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
166
167 /*
168 * Setup hypervisor startup values.
169 */
170
171 /*
172 * Register saved state data item.
173 */
174 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
175 NULL, cpumR3Save, NULL,
176 NULL, cpumR3Load, NULL);
177 if (VBOX_FAILURE(rc))
178 return rc;
179
180 /* Query the CPU manufacturer. */
181 uint32_t uEAX, uEBX, uECX, uEDX;
182 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
183 if ( uEAX >= 1
184 && uEBX == X86_CPUID_VENDOR_AMD_EBX
185 && uECX == X86_CPUID_VENDOR_AMD_ECX
186 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
187 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_AMD;
188 else if ( uEAX >= 1
189 && uEBX == X86_CPUID_VENDOR_INTEL_EBX
190 && uECX == X86_CPUID_VENDOR_INTEL_ECX
191 && uEDX == X86_CPUID_VENDOR_INTEL_EDX)
192 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_INTEL;
193 else /** @todo Via */
194 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_UNKNOWN;
195
196 /*
197 * Register info handlers.
198 */
199 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
200 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
201 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
202 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
203 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
204 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
205
206 /*
207 * Initialize the Guest CPU state.
208 */
209 rc = cpumR3CpuIdInit(pVM);
210 if (VBOX_FAILURE(rc))
211 return rc;
212 CPUMR3Reset(pVM);
213 return VINF_SUCCESS;
214}
215
216
217/**
218 * Initializes the emulated CPU's cpuid information.
219 *
220 * @returns VBox status code.
221 * @param pVM The VM to operate on.
222 */
223static int cpumR3CpuIdInit(PVM pVM)
224{
225 PCPUM pCPUM = &pVM->cpum.s;
226 uint32_t i;
227
228 /*
229 * Get the host CPUIDs.
230 */
231 for (i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
232 ASMCpuId_Idx_ECX(i, 0,
233 &pCPUM->aGuestCpuIdStd[i].eax, &pCPUM->aGuestCpuIdStd[i].ebx,
234 &pCPUM->aGuestCpuIdStd[i].ecx, &pCPUM->aGuestCpuIdStd[i].edx);
235 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
236 ASMCpuId(0x80000000 + i,
237 &pCPUM->aGuestCpuIdExt[i].eax, &pCPUM->aGuestCpuIdExt[i].ebx,
238 &pCPUM->aGuestCpuIdExt[i].ecx, &pCPUM->aGuestCpuIdExt[i].edx);
239 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
240 ASMCpuId(0xc0000000 + i,
241 &pCPUM->aGuestCpuIdCentaur[i].eax, &pCPUM->aGuestCpuIdCentaur[i].ebx,
242 &pCPUM->aGuestCpuIdCentaur[i].ecx, &pCPUM->aGuestCpuIdCentaur[i].edx);
243
244
245 /*
246 * Only report features we can support.
247 */
248 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
249 | X86_CPUID_FEATURE_EDX_VME
250 | X86_CPUID_FEATURE_EDX_DE
251 | X86_CPUID_FEATURE_EDX_PSE
252 | X86_CPUID_FEATURE_EDX_TSC
253 | X86_CPUID_FEATURE_EDX_MSR
254 //| X86_CPUID_FEATURE_EDX_PAE - not implemented yet.
255 | X86_CPUID_FEATURE_EDX_MCE
256 | X86_CPUID_FEATURE_EDX_CX8
257 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
258 /** @note we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
259 //| X86_CPUID_FEATURE_EDX_SEP
260 | X86_CPUID_FEATURE_EDX_MTRR
261 | X86_CPUID_FEATURE_EDX_PGE
262 | X86_CPUID_FEATURE_EDX_MCA
263 | X86_CPUID_FEATURE_EDX_CMOV
264 | X86_CPUID_FEATURE_EDX_PAT
265 //| X86_CPUID_FEATURE_EDX_PSE36 - not virtualized.
266 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
267 | X86_CPUID_FEATURE_EDX_CLFSH
268 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
269 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.
270 | X86_CPUID_FEATURE_EDX_MMX
271 | X86_CPUID_FEATURE_EDX_FXSR
272 | X86_CPUID_FEATURE_EDX_SSE
273 | X86_CPUID_FEATURE_EDX_SSE2
274 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
275 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.
276 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
277 //| X86_CPUID_FEATURE_EDX_PBE - no pneding break enabled.
278 | 0;
279 pCPUM->aGuestCpuIdStd[1].ecx &= 0//X86_CPUID_FEATURE_ECX_SSE3 - not supported by the recompiler yet.
280 | X86_CPUID_FEATURE_ECX_MONITOR
281 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
282 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
283 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
284 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
285 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
286 /* ECX Bit 13 - CX16 - CMPXCHG16B. */
287 //| X86_CPUID_FEATURE_ECX_CX16
288 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
289 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
290 /* ECX Bit 23 - POPCOUNT instruction. */
291 //| X86_CPUID_FEATURE_ECX_POPCOUNT
292 | 0;
293
294 /* ASSUMES that this is ALWAYS the AMD define feature set if present. */
295 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
296 | X86_CPUID_AMD_FEATURE_EDX_VME
297 | X86_CPUID_AMD_FEATURE_EDX_DE
298 | X86_CPUID_AMD_FEATURE_EDX_PSE
299 | X86_CPUID_AMD_FEATURE_EDX_TSC
300 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
301 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.
302 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
303 | X86_CPUID_AMD_FEATURE_EDX_CX8
304 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
305 /** @note we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
306 //| X86_CPUID_AMD_FEATURE_EDX_SEP
307 | X86_CPUID_AMD_FEATURE_EDX_MTRR
308 | X86_CPUID_AMD_FEATURE_EDX_PGE
309 | X86_CPUID_AMD_FEATURE_EDX_MCA
310 | X86_CPUID_AMD_FEATURE_EDX_CMOV
311 | X86_CPUID_AMD_FEATURE_EDX_PAT
312 //| X86_CPUID_AMD_FEATURE_EDX_PSE36 - not virtualized.
313 //| X86_CPUID_AMD_FEATURE_EDX_NX - not virtualized, requires PAE.
314 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
315 | X86_CPUID_AMD_FEATURE_EDX_MMX
316 | X86_CPUID_AMD_FEATURE_EDX_FXSR
317 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
318 //| X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
319 //| X86_CPUID_AMD_FEATURE_EDX_RDTSCP
320 //| X86_CPUID_AMD_FEATURE_EDX_LONG_MODE - not yet.
321 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
322 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
323 | 0;
324 pCPUM->aGuestCpuIdExt[1].ecx &= 0
325 //| X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF
326 //| X86_CPUID_AMD_FEATURE_ECX_CMPL
327 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
328 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
329 //| X86_CPUID_AMD_FEATURE_ECX_CR8L
330 //| X86_CPUID_AMD_FEATURE_ECX_ABM
331 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A
332 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
333 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
334 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
335 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
336 //| X86_CPUID_AMD_FEATURE_ECX_WDT
337 | 0;
338
339 /*
340 * Hide HTT, multicode, SMP, whatever.
341 * (APIC-ID := 0 and #LogCpus := 0)
342 */
343 pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
344
345 /* Cpuid 2:
346 * Intel: Cache and TLB information
347 * AMD: Reserved
348 * Safe to expose
349 */
350
351 /* Cpuid 3:
352 * Intel: EAX, EBX - reserved
353 * ECX, EDX - Processor Serial Number if available, otherwise reserved
354 * AMD: Reserved
355 * Safe to expose
356 */
357 if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
358 pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
359
360 /* Cpuid 4:
361 * Intel: Deterministic Cache Parameters Leaf
362 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 0
363 * AMD: Reserved
364 * Safe to expose, except for EAX:
365 * Bits 25-14: Maximum number of threads sharing this cache in a physical package (see note)**
366 * Bits 31-26: Maximum number of processor cores in this physical package**
367 */
368 pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
369 pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
370
371 /* Cpuid 5: Monitor/mwait Leaf
372 * Intel: ECX, EDX - reserved
373 * EAX, EBX - Smallest and largest monitor line size
374 * AMD: EDX - reserved
375 * EAX, EBX - Smallest and largest monitor line size
376 * ECX - extensions (ignored for now)
377 * Safe to expose
378 */
379 if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
380 pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
381
382 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
383
384 /*
385 * Determine the default.
386 *
387 * Intel returns values of the highest standard function, while AMD
388 * returns zeros. VIA on the other hand seems to returning nothing or
389 * perhaps some random garbage, we don't try to duplicate this behavior.
390 */
391 ASMCpuId(pCPUM->aGuestCpuIdStd[0].eax + 10,
392 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
393 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
394
395 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
396 * Safe to pass on to the guest.
397 *
398 * Intel: 0x800000005 reserved
399 * 0x800000006 L2 cache information
400 * AMD: 0x800000005 L1 cache information
401 * 0x800000006 L2/L3 cache information
402 */
403
404 /* Cpuid 0x800000007:
405 * AMD: EAX, EBX, ECX - reserved
406 * EDX: Advanced Power Management Information
407 * Intel: Reserved
408 */
409 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
410 {
411 Assert(pVM->cpum.s.enmCPUVendor != CPUMCPUVENDOR_INVALID);
412
413 pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
414
415 if (pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
416 {
417 /* Only expose the TSC invariant capability bit to the guest. */
418 pCPUM->aGuestCpuIdExt[7].edx &= 0
419 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
420 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
421 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
422 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
423 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
424 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
425 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
426 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
427 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
428 | 0;
429 }
430 else
431 pCPUM->aGuestCpuIdExt[7].edx = 0;
432 }
433
434 /* Cpuid 0x800000008:
435 * AMD: EBX, EDX - reserved
436 * EAX: Virtual/Physical address Size
437 * ECX: Number of cores + APICIdCoreIdSize
438 * Intel: EAX: Virtual/Physical address Size
439 * EBX, ECX, EDX - reserved
440 */
441 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
442 {
443 /* Only expose the virtual and physical address sizes to the guest. (EAX completely) */
444 pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
445 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
446 * NC (0-7) Number of cores; 0 equals 1 core */
447 pCPUM->aGuestCpuIdExt[8].ecx = 0;
448 }
449
450 /*
451 * Limit it the number of entries and fill the remaining with the defaults.
452 *
453 * The limits are masking off stuff about power saving and similar, this
454 * is perhaps a bit crudely done as there is probably some relatively harmless
455 * info too in these leaves (like words about having a constant TSC).
456 */
457 if (pCPUM->aGuestCpuIdStd[0].eax > 5)
458 pCPUM->aGuestCpuIdStd[0].eax = 5;
459 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
460 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
461
462 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
463 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
464 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
465 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
466 : 0;
467 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
468 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
469
470 /*
471 * Workaround for missing cpuid(0) patches: If we miss to patch a cpuid(0).eax then
472 * Linux tries to determine the number of processors from (cpuid(4).eax >> 26) + 1.
473 * We currently don't support more than 1 processor.
474 */
475 pCPUM->aGuestCpuIdStd[4].eax = 0;
476
477 /*
478 * Centaur stuff (VIA).
479 *
480 * The important part here (we think) is to make sure the 0xc0000000
481 * function returns 0xc0000001. As for the features, we don't currently
482 * let on about any of those... 0xc0000002 seems to be some
483 * temperature/hz/++ stuff, include it as well (static).
484 */
485 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
486 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
487 {
488 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
489 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
490 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
491 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
492 i++)
493 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
494 }
495 else
496 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
497 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
498
499
500 /*
501 * Load CPUID overrides from configuration.
502 */
503 PCPUMCPUID pCpuId = &pCPUM->aGuestCpuIdStd[0];
504 uint32_t cElements = ELEMENTS(pCPUM->aGuestCpuIdStd);
505 for (i=0;; )
506 {
507 while (cElements-- > 0)
508 {
509 PCFGMNODE pNode = CFGMR3GetChildF(CFGMR3GetRoot(pVM), "CPUM/CPUID/%RX32", i);
510 if (pNode)
511 {
512 uint32_t u32;
513 int rc = CFGMR3QueryU32(pNode, "eax", &u32);
514 if (VBOX_SUCCESS(rc))
515 pCpuId->eax = u32;
516 else
517 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
518
519 rc = CFGMR3QueryU32(pNode, "ebx", &u32);
520 if (VBOX_SUCCESS(rc))
521 pCpuId->ebx = u32;
522 else
523 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
524
525 rc = CFGMR3QueryU32(pNode, "ecx", &u32);
526 if (VBOX_SUCCESS(rc))
527 pCpuId->ecx = u32;
528 else
529 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
530
531 rc = CFGMR3QueryU32(pNode, "edx", &u32);
532 if (VBOX_SUCCESS(rc))
533 pCpuId->edx = u32;
534 else
535 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
536 }
537 pCpuId++;
538 i++;
539 }
540
541 /* next */
542 if ((i & UINT32_C(0xc0000000)) == 0)
543 {
544 pCpuId = &pCPUM->aGuestCpuIdExt[0];
545 cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
546 i = UINT32_C(0x80000000);
547 }
548 else if ((i & UINT32_C(0xc0000000)) == UINT32_C(0x80000000))
549 {
550 pCpuId = &pCPUM->aGuestCpuIdCentaur[0];
551 cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
552 i = UINT32_C(0xc0000000);
553 }
554 else
555 break;
556 }
557
558 /* Check if PAE was explicitely enabled by the user. */
559 bool fEnable = false;
560 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable);
561 if (VBOX_SUCCESS(rc) && fEnable)
562 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
563
564 /*
565 * Log the cpuid and we're good.
566 */
567 LogRel(("Logical host processors: %d, processor active mask: %08x\n",
568 RTSystemProcessorGetCount(), RTSystemProcessorGetActiveMask()));
569 LogRel(("************************* CPUID dump ************************\n"));
570 DBGFR3Info(pVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
571 LogRel(("\n"));
572 DBGFR3InfoLog(pVM, "cpuid", "verbose"); /* macro */
573 LogRel(("******************** End of CPUID dump **********************\n"));
574 return VINF_SUCCESS;
575}
576
577
578
579
580/**
581 * Applies relocations to data and code managed by this
582 * component. This function will be called at init and
583 * whenever the VMM need to relocate it self inside the GC.
584 *
585 * The CPUM will update the addresses used by the switcher.
586 *
587 * @param pVM The VM.
588 */
589CPUMR3DECL(void) CPUMR3Relocate(PVM pVM)
590{
591 LogFlow(("CPUMR3Relocate\n"));
592 /*
593 * Switcher pointers.
594 */
595 pVM->cpum.s.pCPUMGC = VM_GUEST_ADDR(pVM, &pVM->cpum.s);
596 pVM->cpum.s.pHyperCoreGC = MMHyperCCToGC(pVM, pVM->cpum.s.pHyperCoreR3);
597 Assert(pVM->cpum.s.pHyperCoreGC != NIL_RTGCPTR);
598}
599
600
601/**
602 * Queries the pointer to the internal CPUMCTX structure
603 *
604 * @returns VBox status code.
605 * @param pVM Handle to the virtual machine.
606 * @param ppCtx Receives the CPUMCTX GC pointer when successful.
607 */
608CPUMR3DECL(int) CPUMR3QueryGuestCtxGCPtr(PVM pVM, RCPTRTYPE(PCPUMCTX) *ppCtx)
609{
610 LogFlow(("CPUMR3QueryGuestCtxGCPtr\n"));
611 /*
612 * Store the address. (Later we might check how's calling, thus the RC.)
613 */
614 *ppCtx = VM_GUEST_ADDR(pVM, &pVM->cpum.s.Guest);
615 return VINF_SUCCESS;
616}
617
618
619/**
620 * Terminates the CPUM.
621 *
622 * Termination means cleaning up and freeing all resources,
623 * the VM it self is at this point powered off or suspended.
624 *
625 * @returns VBox status code.
626 * @param pVM The VM to operate on.
627 */
628CPUMR3DECL(int) CPUMR3Term(PVM pVM)
629{
630 /** @todo ? */
631 return 0;
632}
633
634
635/**
636 * Resets the CPU.
637 *
638 * @returns VINF_SUCCESS.
639 * @param pVM The VM handle.
640 */
641CPUMR3DECL(void) CPUMR3Reset(PVM pVM)
642{
643 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
644
645 /*
646 * Initialize everything to ZERO first.
647 */
648 uint32_t fUseFlags = pVM->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
649 memset(pCtx, 0, sizeof(*pCtx));
650 pVM->cpum.s.fUseFlags = fUseFlags;
651
652 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
653 pCtx->eip = 0x0000fff0;
654 pCtx->edx = 0x00000600; /* P6 processor */
655 pCtx->eflags.Bits.u1Reserved0 = 1;
656
657 pCtx->cs = 0xf000;
658 pCtx->csHid.u64Base = UINT64_C(0xffff0000);
659 pCtx->csHid.u32Limit = 0x0000ffff;
660 pCtx->csHid.Attr.n.u1DescType = 1; /* code/data segment */
661 pCtx->csHid.Attr.n.u1Present = 1;
662 pCtx->csHid.Attr.n.u4Type = X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
663
664 pCtx->dsHid.u32Limit = 0x0000ffff;
665 pCtx->dsHid.Attr.n.u1DescType = 1; /* code/data segment */
666 pCtx->dsHid.Attr.n.u1Present = 1;
667 pCtx->dsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
668
669 pCtx->esHid.u32Limit = 0x0000ffff;
670 pCtx->esHid.Attr.n.u1DescType = 1; /* code/data segment */
671 pCtx->esHid.Attr.n.u1Present = 1;
672 pCtx->esHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
673
674 pCtx->fsHid.u32Limit = 0x0000ffff;
675 pCtx->fsHid.Attr.n.u1DescType = 1; /* code/data segment */
676 pCtx->fsHid.Attr.n.u1Present = 1;
677 pCtx->fsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
678
679 pCtx->gsHid.u32Limit = 0x0000ffff;
680 pCtx->gsHid.Attr.n.u1DescType = 1; /* code/data segment */
681 pCtx->gsHid.Attr.n.u1Present = 1;
682 pCtx->gsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
683
684 pCtx->ssHid.u32Limit = 0x0000ffff;
685 pCtx->ssHid.Attr.n.u1Present = 1;
686 pCtx->ssHid.Attr.n.u1DescType = 1; /* code/data segment */
687 pCtx->ssHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
688
689 pCtx->idtr.cbIdt = 0xffff;
690 pCtx->gdtr.cbGdt = 0xffff;
691
692 pCtx->ldtrHid.u32Limit = 0xffff;
693 pCtx->ldtrHid.Attr.n.u1Present = 1;
694 pCtx->ldtrHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
695
696 pCtx->trHid.u32Limit = 0xffff;
697 pCtx->trHid.Attr.n.u1Present = 1;
698 pCtx->trHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
699
700 pCtx->dr6 = UINT32_C(0xFFFF0FF0);
701 pCtx->dr7 = 0x400;
702
703 pCtx->fpu.FTW = 0xff; /* All tags are set, i.e. the regs are empty. */
704 pCtx->fpu.FCW = 0x37f;
705
706 /* Init PAT MSR */
707 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
708}
709
710
711/**
712 * Execute state save operation.
713 *
714 * @returns VBox status code.
715 * @param pVM VM Handle.
716 * @param pSSM SSM operation handle.
717 */
718static DECLCALLBACK(int) cpumR3Save(PVM pVM, PSSMHANDLE pSSM)
719{
720 /*
721 * Save.
722 */
723 SSMR3PutMem(pSSM, &pVM->cpum.s.Hyper, sizeof(pVM->cpum.s.Hyper));
724 SSMR3PutMem(pSSM, &pVM->cpum.s.Guest, sizeof(pVM->cpum.s.Guest));
725 SSMR3PutU32(pSSM, pVM->cpum.s.fUseFlags);
726 SSMR3PutU32(pSSM, pVM->cpum.s.fChanged);
727
728 SSMR3PutU32(pSSM, ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
729 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
730
731 SSMR3PutU32(pSSM, ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
732 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
733
734 SSMR3PutU32(pSSM, ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
735 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
736
737 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
738
739 /* Add the cpuid for checking that the cpu is unchanged. */
740 uint32_t au32CpuId[8] = {0};
741 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
742 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
743 return SSMR3PutMem(pSSM, &au32CpuId[0], sizeof(au32CpuId));
744}
745
746
747/**
748 * Execute state load operation.
749 *
750 * @returns VBox status code.
751 * @param pVM VM Handle.
752 * @param pSSM SSM operation handle.
753 * @param u32Version Data layout version.
754 */
755static DECLCALLBACK(int) cpumR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
756{
757 /*
758 * Validate version.
759 */
760 if (u32Version != CPUM_SAVED_STATE_VERSION)
761 {
762 Log(("cpuR3Load: Invalid version u32Version=%d!\n", u32Version));
763 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
764 }
765
766 /*
767 * Restore.
768 */
769 uint32_t uCR3 = pVM->cpum.s.Hyper.cr3;
770 uint32_t uESP = pVM->cpum.s.Hyper.esp; /* see VMMR3Relocate(). */
771 SSMR3GetMem(pSSM, &pVM->cpum.s.Hyper, sizeof(pVM->cpum.s.Hyper));
772 pVM->cpum.s.Hyper.cr3 = uCR3;
773 pVM->cpum.s.Hyper.esp = uESP;
774 SSMR3GetMem(pSSM, &pVM->cpum.s.Guest, sizeof(pVM->cpum.s.Guest));
775 SSMR3GetU32(pSSM, &pVM->cpum.s.fUseFlags);
776 SSMR3GetU32(pSSM, &pVM->cpum.s.fChanged);
777
778 uint32_t cElements;
779 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
780 /* Support old saved states with a smaller standard cpuid array. */
781 if (cElements > ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
782 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
783 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdStd[0]));
784
785 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
786 if (cElements != ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
787 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
788 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
789
790 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
791 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
792 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
793 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
794
795 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
796
797 /*
798 * Check that the basic cpuid id information is unchanged.
799 */
800 uint32_t au32CpuId[8] = {0};
801 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
802 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
803 uint32_t au32CpuIdSaved[8];
804 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
805 if (VBOX_SUCCESS(rc))
806 {
807 /* Ignore APIC ID (AMD specs). */
808 au32CpuId[5] &= ~0xff000000;
809 au32CpuIdSaved[5] &= ~0xff000000;
810 /* Ignore the number of Logical CPUs (AMD specs). */
811 au32CpuId[5] &= ~0x00ff0000;
812 au32CpuIdSaved[5] &= ~0x00ff0000;
813
814 /* do the compare */
815 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
816 {
817 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
818 LogRel(("cpumR3Load: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
819 "Saved=%.*Vhxs\n"
820 "Real =%.*Vhxs\n",
821 sizeof(au32CpuIdSaved), au32CpuIdSaved,
822 sizeof(au32CpuId), au32CpuId));
823 else
824 {
825 LogRel(("cpumR3Load: CpuId mismatch!\n"
826 "Saved=%.*Vhxs\n"
827 "Real =%.*Vhxs\n",
828 sizeof(au32CpuIdSaved), au32CpuIdSaved,
829 sizeof(au32CpuId), au32CpuId));
830 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
831 }
832 }
833 }
834
835 return rc;
836}
837
838
839/**
840 * Formats the EFLAGS value into mnemonics.
841 *
842 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
843 * @param efl The EFLAGS value.
844 */
845static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
846{
847 /*
848 * Format the flags.
849 */
850 static struct
851 {
852 const char *pszSet; const char *pszClear; uint32_t fFlag;
853 } s_aFlags[] =
854 {
855 { "vip",NULL, X86_EFL_VIP },
856 { "vif",NULL, X86_EFL_VIF },
857 { "ac", NULL, X86_EFL_AC },
858 { "vm", NULL, X86_EFL_VM },
859 { "rf", NULL, X86_EFL_RF },
860 { "nt", NULL, X86_EFL_NT },
861 { "ov", "nv", X86_EFL_OF },
862 { "dn", "up", X86_EFL_DF },
863 { "ei", "di", X86_EFL_IF },
864 { "tf", NULL, X86_EFL_TF },
865 { "nt", "pl", X86_EFL_SF },
866 { "nz", "zr", X86_EFL_ZF },
867 { "ac", "na", X86_EFL_AF },
868 { "po", "pe", X86_EFL_PF },
869 { "cy", "nc", X86_EFL_CF },
870 };
871 char *psz = pszEFlags;
872 for (unsigned i = 0; i < ELEMENTS(s_aFlags); i++)
873 {
874 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
875 if (pszAdd)
876 {
877 strcpy(psz, pszAdd);
878 psz += strlen(pszAdd);
879 *psz++ = ' ';
880 }
881 }
882 psz[-1] = '\0';
883}
884
885
886/**
887 * Formats a full register dump.
888 *
889 * @param pVM VM Handle.
890 * @param pCtx The context to format.
891 * @param pCtxCore The context core to format.
892 * @param pHlp Output functions.
893 * @param enmType The dump type.
894 * @param pszPrefix Register name prefix.
895 */
896static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType, const char *pszPrefix)
897{
898 /*
899 * Format the EFLAGS.
900 */
901 uint32_t efl = pCtxCore->eflags.u32;
902 char szEFlags[80];
903 cpumR3InfoFormatFlags(&szEFlags[0], efl);
904
905 /*
906 * Format the registers.
907 */
908 switch (enmType)
909 {
910 case CPUMDUMPTYPE_TERSE:
911 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
912 {
913 pHlp->pfnPrintf(pHlp,
914 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
915 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
916 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
917 "%sr14=%016RX64 %sr15=%016RX64\n"
918 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
919 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
920 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
921 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
922 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
923 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
924 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
925 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
926 }
927 else
928 pHlp->pfnPrintf(pHlp,
929 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
930 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
931 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
932 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
933 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
934 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
935 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
936 break;
937
938 case CPUMDUMPTYPE_DEFAULT:
939 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
940 {
941 pHlp->pfnPrintf(pHlp,
942 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
943 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
944 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
945 "%sr14=%016RX64 %sr15=%016RX64\n"
946 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
947 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
948 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%VGv:%04x %sldtr=%04x\n"
949 ,
950 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
951 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
952 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
953 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
954 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
955 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
956 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
957 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
958 }
959 else
960 pHlp->pfnPrintf(pHlp,
961 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
962 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
963 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
964 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%VGv:%04x %sldtr=%04x\n"
965 ,
966 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
967 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
968 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
969 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
970 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
971 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
972 break;
973
974 case CPUMDUMPTYPE_VERBOSE:
975 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
976 {
977 pHlp->pfnPrintf(pHlp,
978 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
979 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
980 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
981 "%sr14=%016RX64 %sr15=%016RX64\n"
982 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
983 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
984 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
985 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
986 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
987 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
988 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
989 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
990 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
991 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
992 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
993 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
994 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
995 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
996 ,
997 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
998 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
999 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1000 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1001 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
1002 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
1003 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
1004 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
1005 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
1006 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
1007 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1008 pszPrefix, pCtx->dr0, pszPrefix, pCtx->dr1, pszPrefix, pCtx->dr2, pszPrefix, pCtx->dr3,
1009 pszPrefix, pCtx->dr4, pszPrefix, pCtx->dr5, pszPrefix, pCtx->dr6, pszPrefix, pCtx->dr7,
1010 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1011 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1012 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1013 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1014 }
1015 else
1016 pHlp->pfnPrintf(pHlp,
1017 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1018 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1019 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
1020 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
1021 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
1022 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
1023 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
1024 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
1025 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1026 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1027 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1028 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1029 ,
1030 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1031 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1032 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pszPrefix, pCtx->dr0, pszPrefix, pCtx->dr1,
1033 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pszPrefix, pCtx->dr2, pszPrefix, pCtx->dr3,
1034 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pszPrefix, pCtx->dr4, pszPrefix, pCtx->dr5,
1035 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pszPrefix, pCtx->dr6, pszPrefix, pCtx->dr7,
1036 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
1037 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1038 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1039 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1040 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1041 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1042
1043 pHlp->pfnPrintf(pHlp,
1044 "FPU:\n"
1045 "%sFCW=%04x %sFSW=%04x %sFTW=%02x\n"
1046 "%sres1=%02x %sFOP=%04x %sFPUIP=%08x %sCS=%04x %sRsvrd1=%04x\n"
1047 "%sFPUDP=%04x %sDS=%04x %sRsvrd2=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
1048 ,
1049 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW,
1050 pszPrefix, pCtx->fpu.huh1, pszPrefix, pCtx->fpu.FOP, pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsvrd1,
1051 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2,
1052 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK);
1053
1054
1055 pHlp->pfnPrintf(pHlp,
1056 "MSR:\n"
1057 "%sEFER =%016RX64\n"
1058 "%sPAT =%016RX64\n"
1059 "%sSTAR =%016RX64\n"
1060 "%sCSTAR =%016RX64\n"
1061 "%sLSTAR =%016RX64\n"
1062 "%sSFMASK =%016RX64\n"
1063 "%sKERNELGSBASE =%016RX64\n",
1064 pszPrefix, pCtx->msrEFER,
1065 pszPrefix, pCtx->msrPAT,
1066 pszPrefix, pCtx->msrSTAR,
1067 pszPrefix, pCtx->msrCSTAR,
1068 pszPrefix, pCtx->msrLSTAR,
1069 pszPrefix, pCtx->msrSFMASK,
1070 pszPrefix, pCtx->msrKERNELGSBASE);
1071
1072 break;
1073 }
1074}
1075
1076
1077/**
1078 * Display all cpu states and any other cpum info.
1079 *
1080 * @param pVM VM Handle.
1081 * @param pHlp The info helper functions.
1082 * @param pszArgs Arguments, ignored.
1083 */
1084static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1085{
1086 cpumR3InfoGuest(pVM, pHlp, pszArgs);
1087 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
1088 cpumR3InfoHyper(pVM, pHlp, pszArgs);
1089 cpumR3InfoHost(pVM, pHlp, pszArgs);
1090}
1091
1092
1093/**
1094 * Parses the info argument.
1095 *
1096 * The argument starts with 'verbose', 'terse' or 'default' and then
1097 * continues with the comment string.
1098 *
1099 * @param pszArgs The pointer to the argument string.
1100 * @param penmType Where to store the dump type request.
1101 * @param ppszComment Where to store the pointer to the comment string.
1102 */
1103static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
1104{
1105 if (!pszArgs)
1106 {
1107 *penmType = CPUMDUMPTYPE_DEFAULT;
1108 *ppszComment = "";
1109 }
1110 else
1111 {
1112 if (!strncmp(pszArgs, "verbose", sizeof("verbose") - 1))
1113 {
1114 pszArgs += 5;
1115 *penmType = CPUMDUMPTYPE_VERBOSE;
1116 }
1117 else if (!strncmp(pszArgs, "terse", sizeof("terse") - 1))
1118 {
1119 pszArgs += 5;
1120 *penmType = CPUMDUMPTYPE_TERSE;
1121 }
1122 else if (!strncmp(pszArgs, "default", sizeof("default") - 1))
1123 {
1124 pszArgs += 7;
1125 *penmType = CPUMDUMPTYPE_DEFAULT;
1126 }
1127 else
1128 *penmType = CPUMDUMPTYPE_DEFAULT;
1129 *ppszComment = RTStrStripL(pszArgs);
1130 }
1131}
1132
1133
1134/**
1135 * Display the guest cpu state.
1136 *
1137 * @param pVM VM Handle.
1138 * @param pHlp The info helper functions.
1139 * @param pszArgs Arguments, ignored.
1140 */
1141static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1142{
1143 CPUMDUMPTYPE enmType;
1144 const char *pszComment;
1145 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1146 pHlp->pfnPrintf(pHlp, "Guest CPUM state: %s\n", pszComment);
1147 cpumR3InfoOne(pVM, &pVM->cpum.s.Guest, CPUMCTX2CORE(&pVM->cpum.s.Guest), pHlp, enmType, "");
1148}
1149
1150/**
1151 * Display the current guest instruction
1152 *
1153 * @param pVM VM Handle.
1154 * @param pHlp The info helper functions.
1155 * @param pszArgs Arguments, ignored.
1156 */
1157static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1158{
1159 char szInstruction[256];
1160 int rc = DBGFR3DisasInstrCurrent(pVM, szInstruction, sizeof(szInstruction));
1161 if (VBOX_SUCCESS(rc))
1162 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
1163}
1164
1165
1166/**
1167 * Display the hypervisor cpu state.
1168 *
1169 * @param pVM VM Handle.
1170 * @param pHlp The info helper functions.
1171 * @param pszArgs Arguments, ignored.
1172 */
1173static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1174{
1175 CPUMDUMPTYPE enmType;
1176 const char *pszComment;
1177 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1178 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
1179 cpumR3InfoOne(pVM, &pVM->cpum.s.Hyper, pVM->cpum.s.pHyperCoreR3, pHlp, enmType, ".");
1180 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
1181}
1182
1183
1184/**
1185 * Display the host cpu state.
1186 *
1187 * @param pVM VM Handle.
1188 * @param pHlp The info helper functions.
1189 * @param pszArgs Arguments, ignored.
1190 */
1191static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1192{
1193 CPUMDUMPTYPE enmType;
1194 const char *pszComment;
1195 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1196 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
1197
1198 /*
1199 * Format the EFLAGS.
1200 */
1201 PCPUMHOSTCTX pCtx = &pVM->cpum.s.Host;
1202#if HC_ARCH_BITS == 32
1203 uint32_t efl = pCtx->eflags.u32;
1204#else
1205 uint64_t efl = pCtx->rflags;
1206#endif
1207 char szEFlags[80];
1208 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1209
1210 /*
1211 * Format the registers.
1212 */
1213#if HC_ARCH_BITS == 32
1214# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1215 if (!(pCtx->efer & MSR_K6_EFER_LMA))
1216# endif
1217 {
1218 pHlp->pfnPrintf(pHlp,
1219 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
1220 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
1221 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
1222 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
1223 "dr0=%08RX64 dr1=%08RX64x dr2=%08RX64 dr3=%08RX64x dr6=%08RX64 dr7=%08RX64\n"
1224 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1225 ,
1226 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
1227 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
1228 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1229 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
1230 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
1231 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, (RTSEL)pCtx->ldtr,
1232 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1233 }
1234# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1235 else
1236# endif
1237#endif
1238#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1239 {
1240 pHlp->pfnPrintf(pHlp,
1241 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
1242 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
1243 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
1244 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
1245 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1246 "r14=%016RX64 r15=%016RX64\n"
1247 "iopl=%d %31s\n"
1248 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
1249 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
1250 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
1251 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64\n"
1252 "dr3=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1253 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
1254 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1255 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
1256 ,
1257 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
1258 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
1259 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
1260 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
1261 pCtx->r11, pCtx->r12, pCtx->r13,
1262 pCtx->r14, pCtx->r15,
1263 X86_EFL_GET_IOPL(efl), szEFlags,
1264 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1265 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
1266 pCtx->cr4, pCtx->ldtr, pCtx->tr,
1267 pCtx->dr0, pCtx->dr1, pCtx->dr2,
1268 pCtx->dr3, pCtx->dr6, pCtx->dr7,
1269 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
1270 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1271 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
1272 }
1273#endif
1274}
1275
1276
1277/**
1278 * Get L1 cache / TLS associativity.
1279 */
1280static const char *getCacheAss(unsigned u, char *pszBuf)
1281{
1282 if (u == 0)
1283 return "res0 ";
1284 if (u == 1)
1285 return "direct";
1286 if (u >= 256)
1287 return "???";
1288
1289 RTStrPrintf(pszBuf, 16, "%d way", u);
1290 return pszBuf;
1291}
1292
1293
1294/**
1295 * Get L2 cache soociativity.
1296 */
1297const char *getL2CacheAss(unsigned u)
1298{
1299 switch (u)
1300 {
1301 case 0: return "off ";
1302 case 1: return "direct";
1303 case 2: return "2 way ";
1304 case 3: return "res3 ";
1305 case 4: return "4 way ";
1306 case 5: return "res5 ";
1307 case 6: return "8 way "; case 7: return "res7 ";
1308 case 8: return "16 way";
1309 case 9: return "res9 ";
1310 case 10: return "res10 ";
1311 case 11: return "res11 ";
1312 case 12: return "res12 ";
1313 case 13: return "res13 ";
1314 case 14: return "res14 ";
1315 case 15: return "fully ";
1316 default:
1317 return "????";
1318 }
1319}
1320
1321
1322/**
1323 * Display the guest CpuId leaves.
1324 *
1325 * @param pVM VM Handle.
1326 * @param pHlp The info helper functions.
1327 * @param pszArgs "terse", "default" or "verbose".
1328 */
1329static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1330{
1331 /*
1332 * Parse the argument.
1333 */
1334 unsigned iVerbosity = 1;
1335 if (pszArgs)
1336 {
1337 pszArgs = RTStrStripL(pszArgs);
1338 if (!strcmp(pszArgs, "terse"))
1339 iVerbosity--;
1340 else if (!strcmp(pszArgs, "verbose"))
1341 iVerbosity++;
1342 }
1343
1344 /*
1345 * Start cracking.
1346 */
1347 CPUMCPUID Host;
1348 CPUMCPUID Guest;
1349 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdStd[0].eax;
1350
1351 pHlp->pfnPrintf(pHlp,
1352 " RAW Standard CPUIDs\n"
1353 " Function eax ebx ecx edx\n");
1354 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
1355 {
1356 Guest = pVM->cpum.s.aGuestCpuIdStd[i];
1357 ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1358
1359 pHlp->pfnPrintf(pHlp,
1360 "Gst: %08x %08x %08x %08x %08x%s\n"
1361 "Hst: %08x %08x %08x %08x\n",
1362 i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1363 i <= cStdMax ? "" : "*",
1364 Host.eax, Host.ebx, Host.ecx, Host.edx);
1365 }
1366
1367 /*
1368 * If verbose, decode it.
1369 */
1370 if (iVerbosity)
1371 {
1372 Guest = pVM->cpum.s.aGuestCpuIdStd[0];
1373 pHlp->pfnPrintf(pHlp,
1374 "Name: %.04s%.04s%.04s\n"
1375 "Supports: 0-%x\n",
1376 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
1377 }
1378
1379 /*
1380 * Get Features.
1381 */
1382 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdStd[0].ebx,
1383 pVM->cpum.s.aGuestCpuIdStd[0].ecx,
1384 pVM->cpum.s.aGuestCpuIdStd[0].edx);
1385 if (cStdMax >= 1 && iVerbosity)
1386 {
1387 Guest = pVM->cpum.s.aGuestCpuIdStd[1];
1388 uint32_t uEAX = Guest.eax;
1389
1390 pHlp->pfnPrintf(pHlp,
1391 "Family: %d \tExtended: %d \tEffective: %d\n"
1392 "Model: %d \tExtended: %d \tEffective: %d\n"
1393 "Stepping: %d\n"
1394 "APIC ID: %#04x\n"
1395 "Logical CPUs: %d\n"
1396 "CLFLUSH Size: %d\n"
1397 "Brand ID: %#04x\n",
1398 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
1399 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
1400 ASMGetCpuStepping(uEAX),
1401 (Guest.ebx >> 24) & 0xff,
1402 (Guest.ebx >> 16) & 0xff,
1403 (Guest.ebx >> 8) & 0xff,
1404 (Guest.ebx >> 0) & 0xff);
1405 if (iVerbosity == 1)
1406 {
1407 uint32_t uEDX = Guest.edx;
1408 pHlp->pfnPrintf(pHlp, "Features EDX: ");
1409 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
1410 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
1411 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
1412 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
1413 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
1414 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
1415 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
1416 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
1417 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
1418 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
1419 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
1420 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SEP");
1421 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
1422 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
1423 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
1424 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
1425 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
1426 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
1427 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " PSN");
1428 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " CLFSH");
1429 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " 20");
1430 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " DS");
1431 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ACPI");
1432 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
1433 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
1434 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " SSE");
1435 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " SSE2");
1436 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " SS");
1437 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " HTT");
1438 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " TM");
1439 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
1440 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " PBE");
1441 pHlp->pfnPrintf(pHlp, "\n");
1442
1443 uint32_t uECX = Guest.ecx;
1444 pHlp->pfnPrintf(pHlp, "Features ECX: ");
1445 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3");
1446 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " 1");
1447 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " 2");
1448 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " MONITOR");
1449 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " DS-CPL");
1450 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " VMX");
1451 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " 6");
1452 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " EST");
1453 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " TM2");
1454 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " 9");
1455 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " CNXT-ID");
1456 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " 11");
1457 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " 12");
1458 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " CX16");
1459 for (unsigned iBit = 14; iBit < 32; iBit++)
1460 if (uECX & RT_BIT(iBit))
1461 pHlp->pfnPrintf(pHlp, " %d", iBit);
1462 pHlp->pfnPrintf(pHlp, "\n");
1463 }
1464 else
1465 {
1466 ASMCpuId(1, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1467
1468 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
1469 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
1470 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
1471 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
1472
1473 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
1474 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", EdxGuest.u1FPU, EdxHost.u1FPU);
1475 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", EdxGuest.u1VME, EdxHost.u1VME);
1476 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", EdxGuest.u1DE, EdxHost.u1DE);
1477 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", EdxGuest.u1PSE, EdxHost.u1PSE);
1478 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", EdxGuest.u1TSC, EdxHost.u1TSC);
1479 pHlp->pfnPrintf(pHlp, "MSR - Model Specific Registers = %d (%d)\n", EdxGuest.u1MSR, EdxHost.u1MSR);
1480 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", EdxGuest.u1PAE, EdxHost.u1PAE);
1481 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", EdxGuest.u1MCE, EdxHost.u1MCE);
1482 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", EdxGuest.u1CX8, EdxHost.u1CX8);
1483 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", EdxGuest.u1APIC, EdxHost.u1APIC);
1484 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved1, EdxHost.u1Reserved1);
1485 pHlp->pfnPrintf(pHlp, "SEP - SYSENTER and SYSEXIT = %d (%d)\n", EdxGuest.u1SEP, EdxHost.u1SEP);
1486 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", EdxGuest.u1MTRR, EdxHost.u1MTRR);
1487 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", EdxGuest.u1PGE, EdxHost.u1PGE);
1488 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", EdxGuest.u1MCA, EdxHost.u1MCA);
1489 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", EdxGuest.u1CMOV, EdxHost.u1CMOV);
1490 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", EdxGuest.u1PAT, EdxHost.u1PAT);
1491 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", EdxGuest.u1PSE36, EdxHost.u1PSE36);
1492 pHlp->pfnPrintf(pHlp, "PSN - Processor Serial Number = %d (%d)\n", EdxGuest.u1PSN, EdxHost.u1PSN);
1493 pHlp->pfnPrintf(pHlp, "CLFSH - CLFLUSH Instruction. = %d (%d)\n", EdxGuest.u1CLFSH, EdxHost.u1CLFSH);
1494 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved2, EdxHost.u1Reserved2);
1495 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", EdxGuest.u1DS, EdxHost.u1DS);
1496 pHlp->pfnPrintf(pHlp, "ACPI - Thermal Mon. & Soft. Clock Ctrl.= %d (%d)\n", EdxGuest.u1ACPI, EdxHost.u1ACPI);
1497 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", EdxGuest.u1MMX, EdxHost.u1MMX);
1498 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", EdxGuest.u1FXSR, EdxHost.u1FXSR);
1499 pHlp->pfnPrintf(pHlp, "SSE - SSE Support = %d (%d)\n", EdxGuest.u1SSE, EdxHost.u1SSE);
1500 pHlp->pfnPrintf(pHlp, "SSE2 - SSE2 Support = %d (%d)\n", EdxGuest.u1SSE2, EdxHost.u1SSE2);
1501 pHlp->pfnPrintf(pHlp, "SS - Self Snoop = %d (%d)\n", EdxGuest.u1SS, EdxHost.u1SS);
1502 pHlp->pfnPrintf(pHlp, "HTT - Hyper-Threading Technolog = %d (%d)\n", EdxGuest.u1HTT, EdxHost.u1HTT);
1503 pHlp->pfnPrintf(pHlp, "TM - Thermal Monitor = %d (%d)\n", EdxGuest.u1TM, EdxHost.u1TM);
1504 pHlp->pfnPrintf(pHlp, "30 - Reserved = %d (%d)\n", EdxGuest.u1Reserved3, EdxHost.u1Reserved3);
1505 pHlp->pfnPrintf(pHlp, "PBE - Pending Break Enable = %d (%d)\n", EdxGuest.u1PBE, EdxHost.u1PBE);
1506
1507 pHlp->pfnPrintf(pHlp, "Supports SSE3 or not = %d (%d)\n", EcxGuest.u1SSE3, EcxHost.u1SSE3);
1508 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EcxGuest.u2Reserved1, EcxHost.u2Reserved1);
1509 pHlp->pfnPrintf(pHlp, "Supports MONITOR/MWAIT = %d (%d)\n", EcxGuest.u1Monitor, EcxHost.u1Monitor);
1510 pHlp->pfnPrintf(pHlp, "CPL-DS - CPL Qualified Debug Store = %d (%d)\n", EcxGuest.u1CPLDS, EcxHost.u1CPLDS);
1511 pHlp->pfnPrintf(pHlp, "VMX - Virtual Machine Technology = %d (%d)\n", EcxGuest.u1VMX, EcxHost.u1VMX);
1512 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EcxGuest.u1Reserved2, EcxHost.u1Reserved2);
1513 pHlp->pfnPrintf(pHlp, "Enhanced SpeedStep Technology = %d (%d)\n", EcxGuest.u1EST, EcxHost.u1EST);
1514 pHlp->pfnPrintf(pHlp, "Terminal Monitor 2 = %d (%d)\n", EcxGuest.u1TM2, EcxHost.u1TM2);
1515 pHlp->pfnPrintf(pHlp, "Supports Supplemental SSE3 or not = %d (%d)\n", EcxGuest.u1SSSE3, EcxHost.u1SSSE3);
1516 pHlp->pfnPrintf(pHlp, "L1 Context ID = %d (%d)\n", EcxGuest.u1CNTXID, EcxHost.u1CNTXID);
1517 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved4, EcxHost.u2Reserved4);
1518 pHlp->pfnPrintf(pHlp, "CMPXCHG16B = %d (%d)\n", EcxGuest.u1CX16, EcxHost.u1CX16);
1519 pHlp->pfnPrintf(pHlp, "xTPR Update Control = %d (%d)\n", EcxGuest.u1TPRUpdate, EcxHost.u1TPRUpdate);
1520 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u17Reserved5, EcxHost.u17Reserved5);
1521 }
1522 }
1523 if (cStdMax >= 2 && iVerbosity)
1524 {
1525 /** @todo */
1526 }
1527
1528 /*
1529 * Extended.
1530 * Implemented after AMD specs.
1531 */
1532 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdExt[0].eax & 0xffff;
1533
1534 pHlp->pfnPrintf(pHlp,
1535 "\n"
1536 " RAW Extended CPUIDs\n"
1537 " Function eax ebx ecx edx\n");
1538 for (unsigned i = 0; i < ELEMENTS(pVM->cpum.s.aGuestCpuIdExt); i++)
1539 {
1540 Guest = pVM->cpum.s.aGuestCpuIdExt[i];
1541 ASMCpuId(0x80000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1542
1543 pHlp->pfnPrintf(pHlp,
1544 "Gst: %08x %08x %08x %08x %08x%s\n"
1545 "Hst: %08x %08x %08x %08x\n",
1546 0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1547 i <= cExtMax ? "" : "*",
1548 Host.eax, Host.ebx, Host.ecx, Host.edx);
1549 }
1550
1551 /*
1552 * Understandable output
1553 */
1554 if (iVerbosity && cExtMax >= 0)
1555 {
1556 Guest = pVM->cpum.s.aGuestCpuIdExt[0];
1557 pHlp->pfnPrintf(pHlp,
1558 "Ext Name: %.4s%.4s%.4s\n"
1559 "Ext Supports: 0x80000000-%#010x\n",
1560 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
1561 }
1562
1563 if (iVerbosity && cExtMax >= 1)
1564 {
1565 Guest = pVM->cpum.s.aGuestCpuIdExt[1];
1566 uint32_t uEAX = Guest.eax;
1567 pHlp->pfnPrintf(pHlp,
1568 "Family: %d \tExtended: %d \tEffective: %d\n"
1569 "Model: %d \tExtended: %d \tEffective: %d\n"
1570 "Stepping: %d\n"
1571 "Brand ID: %#05x\n",
1572 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
1573 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
1574 ASMGetCpuStepping(uEAX),
1575 Guest.ebx & 0xfff);
1576
1577 if (iVerbosity == 1)
1578 {
1579 uint32_t uEDX = Guest.edx;
1580 pHlp->pfnPrintf(pHlp, "Features EDX: ");
1581 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
1582 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
1583 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
1584 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
1585 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
1586 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
1587 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
1588 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
1589 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
1590 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
1591 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
1592 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SCR");
1593 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
1594 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
1595 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
1596 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
1597 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
1598 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
1599 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " 18");
1600 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " 19");
1601 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " NX");
1602 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " 21");
1603 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ExtMMX");
1604 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
1605 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
1606 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " FastFXSR");
1607 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " Page1GB");
1608 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " RDTSCP");
1609 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " 28");
1610 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " LongMode");
1611 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " Ext3DNow");
1612 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 3DNow");
1613 pHlp->pfnPrintf(pHlp, "\n");
1614
1615 uint32_t uECX = Guest.ecx;
1616 pHlp->pfnPrintf(pHlp, "Features ECX: ");
1617 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
1618 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " CMPL");
1619 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " SVM");
1620 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " ExtAPIC");
1621 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " CR8L");
1622 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " ABM");
1623 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SSE4A");
1624 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MISALNSSE");
1625 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " 3DNOWPRF");
1626 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " OSVW");
1627 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " IBS");
1628 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SSE5");
1629 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " SKINIT");
1630 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " WDT");
1631 for (unsigned iBit = 5; iBit < 32; iBit++)
1632 if (uECX & RT_BIT(iBit))
1633 pHlp->pfnPrintf(pHlp, " %d", iBit);
1634 pHlp->pfnPrintf(pHlp, "\n");
1635 }
1636 else
1637 {
1638 ASMCpuId(0x80000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1639
1640 uint32_t uEdxGst = Guest.edx;
1641 uint32_t uEdxHst = Host.edx;
1642 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
1643 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
1644 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
1645 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
1646 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
1647 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
1648 pHlp->pfnPrintf(pHlp, "MSR - K86 Model Specific Registers = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
1649 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
1650 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
1651 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
1652 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
1653 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
1654 pHlp->pfnPrintf(pHlp, "SEP - SYSCALL and SYSRET = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
1655 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
1656 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
1657 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
1658 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
1659 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
1660 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
1661 pHlp->pfnPrintf(pHlp, "18 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
1662 pHlp->pfnPrintf(pHlp, "19 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
1663 pHlp->pfnPrintf(pHlp, "NX - No-Execute Page Protection = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
1664 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
1665 pHlp->pfnPrintf(pHlp, "AXMMX - AMD Extensions to MMX Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
1666 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
1667 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
1668 pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
1669 pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
1670 pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));
1671 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28)));
1672 pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));
1673 pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));
1674 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));
1675
1676 uint32_t uEcxGst = Guest.ecx;
1677 uint32_t uEcxHst = Host.ecx;
1678 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0)));
1679 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1)));
1680 pHlp->pfnPrintf(pHlp, "SVM - AMD VM Extensions = %d (%d)\n", !!(uEcxGst & RT_BIT( 2)), !!(uEcxHst & RT_BIT( 2)));
1681 pHlp->pfnPrintf(pHlp, "APIC registers starting at 0x400 = %d (%d)\n", !!(uEcxGst & RT_BIT( 3)), !!(uEcxHst & RT_BIT( 3)));
1682 pHlp->pfnPrintf(pHlp, "AltMovCR8 - LOCK MOV CR0 means MOV CR8 = %d (%d)\n", !!(uEcxGst & RT_BIT( 4)), !!(uEcxHst & RT_BIT( 4)));
1683 pHlp->pfnPrintf(pHlp, "Advanced bit manipulation = %d (%d)\n", !!(uEcxGst & RT_BIT( 5)), !!(uEcxHst & RT_BIT( 5)));
1684 pHlp->pfnPrintf(pHlp, "SSE4A instruction support = %d (%d)\n", !!(uEcxGst & RT_BIT( 6)), !!(uEcxHst & RT_BIT( 6)));
1685 pHlp->pfnPrintf(pHlp, "Misaligned SSE mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 7)), !!(uEcxHst & RT_BIT( 7)));
1686 pHlp->pfnPrintf(pHlp, "PREFETCH and PREFETCHW instruction = %d (%d)\n", !!(uEcxGst & RT_BIT( 8)), !!(uEcxHst & RT_BIT( 8)));
1687 pHlp->pfnPrintf(pHlp, "OS visible workaround = %d (%d)\n", !!(uEcxGst & RT_BIT( 9)), !!(uEcxHst & RT_BIT( 9)));
1688 pHlp->pfnPrintf(pHlp, "Instruction based sampling = %d (%d)\n", !!(uEcxGst & RT_BIT(10)), !!(uEcxHst & RT_BIT(10)));
1689 pHlp->pfnPrintf(pHlp, "SSE5 support = %d (%d)\n", !!(uEcxGst & RT_BIT(11)), !!(uEcxHst & RT_BIT(11)));
1690 pHlp->pfnPrintf(pHlp, "SKINIT, STGI, and DEV support = %d (%d)\n", !!(uEcxGst & RT_BIT(12)), !!(uEcxHst & RT_BIT(12)));
1691 pHlp->pfnPrintf(pHlp, "Watchdog timer support. = %d (%d)\n", !!(uEcxGst & RT_BIT(13)), !!(uEcxHst & RT_BIT(13)));
1692 pHlp->pfnPrintf(pHlp, "31:14 - Reserved = %#x (%#x)\n", uEcxGst >> 14, uEcxHst >> 14);
1693 }
1694 }
1695
1696 if (iVerbosity && cExtMax >= 2)
1697 {
1698 char szString[4*4*3+1] = {0};
1699 uint32_t *pu32 = (uint32_t *)szString;
1700 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].eax;
1701 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ebx;
1702 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ecx;
1703 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].edx;
1704 if (cExtMax >= 3)
1705 {
1706 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].eax;
1707 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ebx;
1708 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ecx;
1709 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].edx;
1710 }
1711 if (cExtMax >= 4)
1712 {
1713 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].eax;
1714 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ebx;
1715 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ecx;
1716 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].edx;
1717 }
1718 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString);
1719 }
1720
1721 if (iVerbosity && cExtMax >= 5)
1722 {
1723 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[5].eax;
1724 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[5].ebx;
1725 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[5].ecx;
1726 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[5].edx;
1727 char sz1[32];
1728 char sz2[32];
1729
1730 pHlp->pfnPrintf(pHlp,
1731 "TLB 2/4M Instr/Uni: %s %3d entries\n"
1732 "TLB 2/4M Data: %s %3d entries\n",
1733 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
1734 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
1735 pHlp->pfnPrintf(pHlp,
1736 "TLB 4K Instr/Uni: %s %3d entries\n"
1737 "TLB 4K Data: %s %3d entries\n",
1738 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
1739 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
1740 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
1741 "L1 Instr Cache Lines Per Tag: %d\n"
1742 "L1 Instr Cache Associativity: %s\n"
1743 "L1 Instr Cache Size: %d KB\n",
1744 (uEDX >> 0) & 0xff,
1745 (uEDX >> 8) & 0xff,
1746 getCacheAss((uEDX >> 16) & 0xff, sz1),
1747 (uEDX >> 24) & 0xff);
1748 pHlp->pfnPrintf(pHlp,
1749 "L1 Data Cache Line Size: %d bytes\n"
1750 "L1 Data Cache Lines Per Tag: %d\n"
1751 "L1 Data Cache Associativity: %s\n"
1752 "L1 Data Cache Size: %d KB\n",
1753 (uECX >> 0) & 0xff,
1754 (uECX >> 8) & 0xff,
1755 getCacheAss((uECX >> 16) & 0xff, sz1),
1756 (uECX >> 24) & 0xff);
1757 }
1758
1759 if (iVerbosity && cExtMax >= 6)
1760 {
1761 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[6].eax;
1762 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[6].ebx;
1763 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[6].edx;
1764
1765 pHlp->pfnPrintf(pHlp,
1766 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
1767 "L2 TLB 2/4M Data: %s %4d entries\n",
1768 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
1769 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
1770 pHlp->pfnPrintf(pHlp,
1771 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
1772 "L2 TLB 4K Data: %s %4d entries\n",
1773 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
1774 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
1775 pHlp->pfnPrintf(pHlp,
1776 "L2 Cache Line Size: %d bytes\n"
1777 "L2 Cache Lines Per Tag: %d\n"
1778 "L2 Cache Associativity: %s\n"
1779 "L2 Cache Size: %d KB\n",
1780 (uEDX >> 0) & 0xff,
1781 (uEDX >> 8) & 0xf,
1782 getL2CacheAss((uEDX >> 12) & 0xf),
1783 (uEDX >> 16) & 0xffff);
1784 }
1785
1786 if (iVerbosity && cExtMax >= 7)
1787 {
1788 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[7].edx;
1789
1790 pHlp->pfnPrintf(pHlp, "APM Features: ");
1791 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " TS");
1792 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " FID");
1793 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " VID");
1794 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " TTP");
1795 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TM");
1796 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " STC");
1797 for (unsigned iBit = 6; iBit < 32; iBit++)
1798 if (uEDX & RT_BIT(iBit))
1799 pHlp->pfnPrintf(pHlp, " %d", iBit);
1800 pHlp->pfnPrintf(pHlp, "\n");
1801 }
1802
1803 if (iVerbosity && cExtMax >= 8)
1804 {
1805 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[8].eax;
1806 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[8].ecx;
1807
1808 pHlp->pfnPrintf(pHlp,
1809 "Physical Address Width: %d bits\n"
1810 "Virtual Address Width: %d bits\n",
1811 (uEAX >> 0) & 0xff,
1812 (uEAX >> 8) & 0xff);
1813 pHlp->pfnPrintf(pHlp,
1814 "Physical Core Count: %d\n",
1815 (uECX >> 0) & 0xff);
1816 }
1817
1818
1819 /*
1820 * Centaur.
1821 */
1822 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdCentaur[0].eax & 0xffff;
1823
1824 pHlp->pfnPrintf(pHlp,
1825 "\n"
1826 " RAW Centaur CPUIDs\n"
1827 " Function eax ebx ecx edx\n");
1828 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); i++)
1829 {
1830 Guest = pVM->cpum.s.aGuestCpuIdCentaur[i];
1831 ASMCpuId(0xc0000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1832
1833 pHlp->pfnPrintf(pHlp,
1834 "Gst: %08x %08x %08x %08x %08x%s\n"
1835 "Hst: %08x %08x %08x %08x\n",
1836 0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1837 i <= cCentaurMax ? "" : "*",
1838 Host.eax, Host.ebx, Host.ecx, Host.edx);
1839 }
1840
1841 /*
1842 * Understandable output
1843 */
1844 if (iVerbosity && cCentaurMax >= 0)
1845 {
1846 Guest = pVM->cpum.s.aGuestCpuIdCentaur[0];
1847 pHlp->pfnPrintf(pHlp,
1848 "Centaur Supports: 0xc0000000-%#010x\n",
1849 Guest.eax);
1850 }
1851
1852 if (iVerbosity && cCentaurMax >= 1)
1853 {
1854 ASMCpuId(0xc0000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1855 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdExt[1].edx;
1856 uint32_t uEdxHst = Host.edx;
1857
1858 if (iVerbosity == 1)
1859 {
1860 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
1861 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
1862 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
1863 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
1864 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
1865 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
1866 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
1867 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
1868 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
1869 /* possibly indicating MM/HE and MM/HE-E on older chips... */
1870 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
1871 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
1872 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
1873 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
1874 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
1875 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
1876 for (unsigned iBit = 14; iBit < 32; iBit++)
1877 if (uEdxGst & RT_BIT(iBit))
1878 pHlp->pfnPrintf(pHlp, " %d", iBit);
1879 pHlp->pfnPrintf(pHlp, "\n");
1880 }
1881 else
1882 {
1883 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
1884 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
1885 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
1886 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
1887 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
1888 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
1889 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
1890 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
1891 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
1892 /* possibly indicating MM/HE and MM/HE-E on older chips... */
1893 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
1894 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
1895 pHlp->pfnPrintf(pHlp, "PHE - Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
1896 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
1897 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
1898 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
1899 for (unsigned iBit = 14; iBit < 32; iBit++)
1900 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
1901 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
1902 pHlp->pfnPrintf(pHlp, "\n");
1903 }
1904 }
1905}
1906
1907
1908/**
1909 * Structure used when disassembling and instructions in DBGF.
1910 * This is used so the reader function can get the stuff it needs.
1911 */
1912typedef struct CPUMDISASSTATE
1913{
1914 /** Pointer to the CPU structure. */
1915 PDISCPUSTATE pCpu;
1916 /** The VM handle. */
1917 PVM pVM;
1918 /** Pointer to the first byte in the segemnt. */
1919 RTGCUINTPTR GCPtrSegBase;
1920 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
1921 RTGCUINTPTR GCPtrSegEnd;
1922 /** The size of the segment minus 1. */
1923 RTGCUINTPTR cbSegLimit;
1924 /** Pointer to the current page - HC Ptr. */
1925 void const *pvPageHC;
1926 /** Pointer to the current page - GC Ptr. */
1927 RTGCPTR pvPageGC;
1928 /** The lock information that PGMPhysReleasePageMappingLock needs. */
1929 PGMPAGEMAPLOCK PageMapLock;
1930 /** Whether the PageMapLock is valid or not. */
1931 bool fLocked;
1932 /** 64 bits mode or not. */
1933 bool f64Bits;
1934} CPUMDISASSTATE, *PCPUMDISASSTATE;
1935
1936
1937/**
1938 * Instruction reader.
1939 *
1940 * @returns VBox status code.
1941 * @param PtrSrc Address to read from.
1942 * In our case this is relative to the selector pointed to by the 2nd user argument of uDisCpu.
1943 * @param pu8Dst Where to store the bytes.
1944 * @param cbRead Number of bytes to read.
1945 * @param uDisCpu Pointer to the disassembler cpu state.
1946 * In this context it's always pointer to the Core of a DBGFDISASSTATE.
1947 */
1948static DECLCALLBACK(int) cpumR3DisasInstrRead(RTUINTPTR PtrSrc, uint8_t *pu8Dst, unsigned cbRead, void *uDisCpu)
1949{
1950 PDISCPUSTATE pCpu = (PDISCPUSTATE)uDisCpu;
1951 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pCpu->apvUserData[0];
1952 Assert(cbRead > 0);
1953 for (;;)
1954 {
1955 RTGCUINTPTR GCPtr = PtrSrc + pState->GCPtrSegBase;
1956
1957 /* Need to update the page translation? */
1958 if ( !pState->pvPageHC
1959 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
1960 {
1961 int rc = VINF_SUCCESS;
1962
1963 /* translate the address */
1964 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
1965 if (MMHyperIsInsideArea(pState->pVM, pState->pvPageGC))
1966 {
1967 pState->pvPageHC = MMHyperGC2HC(pState->pVM, pState->pvPageGC);
1968 if (!pState->pvPageHC)
1969 rc = VERR_INVALID_POINTER;
1970 }
1971 else
1972 {
1973 /* Release mapping lock previously acquired. */
1974 if (pState->fLocked)
1975 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
1976 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVM, pState->pvPageGC, &pState->pvPageHC, &pState->PageMapLock);
1977 pState->fLocked = RT_SUCCESS_NP(rc);
1978 }
1979 if (VBOX_FAILURE(rc))
1980 {
1981 pState->pvPageHC = NULL;
1982 return rc;
1983 }
1984 }
1985
1986 /* check the segemnt limit */
1987 if (!pState->f64Bits && PtrSrc > pState->cbSegLimit)
1988 return VERR_OUT_OF_SELECTOR_BOUNDS;
1989
1990 /* calc how much we can read */
1991 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
1992 if (!pState->f64Bits)
1993 {
1994 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
1995 if (cb > cbSeg && cbSeg)
1996 cb = cbSeg;
1997 }
1998 if (cb > cbRead)
1999 cb = cbRead;
2000
2001 /* read and advance */
2002 memcpy(pu8Dst, (char *)pState->pvPageHC + (GCPtr & PAGE_OFFSET_MASK), cb);
2003 cbRead -= cb;
2004 if (!cbRead)
2005 return VINF_SUCCESS;
2006 pu8Dst += cb;
2007 PtrSrc += cb;
2008 }
2009}
2010
2011
2012/**
2013 * Disassemble an instruction and return the information in the provided structure.
2014 *
2015 * @returns VBox status code.
2016 * @param pVM VM Handle
2017 * @param pCtx CPU context
2018 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
2019 * @param pCpu Disassembly state
2020 * @param pszPrefix String prefix for logging (debug only)
2021 *
2022 */
2023CPUMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
2024{
2025 CPUMDISASSTATE State;
2026 int rc;
2027
2028 const PGMMODE enmMode = PGMGetGuestMode(pVM);
2029 State.pCpu = pCpu;
2030 State.pvPageGC = 0;
2031 State.pvPageHC = NULL;
2032 State.pVM = pVM;
2033 State.fLocked = false;
2034 State.f64Bits = false;
2035
2036 /*
2037 * Get selector information.
2038 */
2039 if ( (pCtx->cr0 & X86_CR0_PE)
2040 && pCtx->eflags.Bits.u1VM == 0)
2041 {
2042 if (CPUMAreHiddenSelRegsValid(pVM))
2043 {
2044 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->csHid.Attr.n.u1Long;
2045 State.GCPtrSegBase = pCtx->csHid.u64Base;
2046 State.GCPtrSegEnd = pCtx->csHid.u32Limit + 1 + (RTGCUINTPTR)pCtx->csHid.u64Base;
2047 State.cbSegLimit = pCtx->csHid.u32Limit;
2048 pCpu->mode = (State.f64Bits)
2049 ? CPUMODE_64BIT
2050 : pCtx->csHid.Attr.n.u1DefBig
2051 ? CPUMODE_32BIT
2052 : CPUMODE_16BIT;
2053 }
2054 else
2055 {
2056 SELMSELINFO SelInfo;
2057
2058 rc = SELMR3GetShadowSelectorInfo(pVM, pCtx->cs, &SelInfo);
2059 if (!VBOX_SUCCESS(rc))
2060 {
2061 AssertMsgFailed(("SELMR3GetShadowSelectorInfo failed for %04X:%VGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
2062 return rc;
2063 }
2064
2065 /*
2066 * Validate the selector.
2067 */
2068 rc = SELMSelInfoValidateCS(&SelInfo, pCtx->ss);
2069 if (!VBOX_SUCCESS(rc))
2070 {
2071 AssertMsgFailed(("SELMSelInfoValidateCS failed for %04X:%VGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
2072 return rc;
2073 }
2074 State.GCPtrSegBase = SelInfo.GCPtrBase;
2075 State.GCPtrSegEnd = SelInfo.cbLimit + 1 + (RTGCUINTPTR)SelInfo.GCPtrBase;
2076 State.cbSegLimit = SelInfo.cbLimit;
2077 pCpu->mode = SelInfo.Raw.Gen.u1DefBig ? CPUMODE_32BIT : CPUMODE_16BIT;
2078 }
2079 }
2080 else
2081 {
2082 /* real or V86 mode */
2083 pCpu->mode = CPUMODE_16BIT;
2084 State.GCPtrSegBase = pCtx->cs * 16;
2085 State.GCPtrSegEnd = 0xFFFFFFFF;
2086 State.cbSegLimit = 0xFFFFFFFF;
2087 }
2088
2089 /*
2090 * Disassemble the instruction.
2091 */
2092 pCpu->pfnReadBytes = cpumR3DisasInstrRead;
2093 pCpu->apvUserData[0] = &State;
2094
2095 uint32_t cbInstr;
2096#ifndef LOG_ENABLED
2097 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, NULL);
2098 if (VBOX_SUCCESS(rc))
2099 {
2100#else
2101 char szOutput[160];
2102 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, &szOutput[0]);
2103 if (VBOX_SUCCESS(rc))
2104 {
2105 /* log it */
2106 if (pszPrefix)
2107 Log(("%s: %s", pszPrefix, szOutput));
2108 else
2109 Log(("%s", szOutput));
2110#endif
2111 rc = VINF_SUCCESS;
2112 }
2113 else
2114 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%VGv rc=%Vrc\n", pCtx->cs, GCPtrPC, rc));
2115
2116 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
2117 if (State.fLocked)
2118 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
2119
2120 return rc;
2121}
2122
2123#ifdef DEBUG
2124
2125/**
2126 * Disassemble an instruction and dump it to the log
2127 *
2128 * @returns VBox status code.
2129 * @param pVM VM Handle
2130 * @param pCtx CPU context
2131 * @param pc GC instruction pointer
2132 * @param prefix String prefix for logging
2133 * @deprecated Use DBGFR3DisasInstrCurrentLog().
2134 *
2135 */
2136CPUMR3DECL(void) CPUMR3DisasmInstr(PVM pVM, PCPUMCTX pCtx, RTGCPTR pc, char *prefix)
2137{
2138 DISCPUSTATE cpu;
2139
2140 CPUMR3DisasmInstrCPU(pVM, pCtx, pc, &cpu, prefix);
2141}
2142
2143/**
2144 * Disassemble an instruction and dump it to the log
2145 *
2146 * @returns VBox status code.
2147 * @param pVM VM Handle
2148 * @param pCtx CPU context
2149 * @param pc GC instruction pointer
2150 * @param prefix String prefix for logging
2151 * @param nrInstructions
2152 *
2153 */
2154CPUMR3DECL(void) CPUMR3DisasmBlock(PVM pVM, PCPUMCTX pCtx, RTGCPTR pc, char *prefix, int nrInstructions)
2155{
2156 for(int i=0;i<nrInstructions;i++)
2157 {
2158 DISCPUSTATE cpu;
2159
2160 CPUMR3DisasmInstrCPU(pVM, pCtx, pc, &cpu, prefix);
2161 pc += cpu.opsize;
2162 }
2163}
2164
2165#endif /* DEBUG */
2166
2167#ifdef DEBUG
2168/**
2169 * Debug helper - Saves guest context on raw mode entry (for fatal dump)
2170 *
2171 * @internal
2172 */
2173CPUMR3DECL(void) CPUMR3SaveEntryCtx(PVM pVM)
2174{
2175 pVM->cpum.s.GuestEntry = pVM->cpum.s.Guest;
2176}
2177#endif /* DEBUG */
2178
2179
2180/**
2181 * API for controlling a few of the CPU features found in CR4.
2182 *
2183 * Currently only X86_CR4_TSD is accepted as input.
2184 *
2185 * @returns VBox status code.
2186 *
2187 * @param pVM The VM handle.
2188 * @param fOr The CR4 OR mask.
2189 * @param fAnd The CR4 AND mask.
2190 */
2191CPUMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
2192{
2193 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
2194 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
2195
2196 pVM->cpum.s.CR4.OrMask &= fAnd;
2197 pVM->cpum.s.CR4.OrMask |= fOr;
2198
2199 return VINF_SUCCESS;
2200}
2201
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette