VirtualBox

source: vbox/trunk/src/VBox/VMM/CPUM.cpp@ 10100

Last change on this file since 10100 was 10100, checked in by vboxsync, 16 years ago

Added missing cpuid features (all disabled)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 98.4 KB
Line 
1/* $Id: CPUM.cpp 10100 2008-07-02 12:48:07Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_cpum
23 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
24 * also responsible for lazy FPU handling and some of the context loading
25 * in raw mode.
26 *
27 * There are three CPU contexts, the most important one is the guest one (GC).
28 * When running in raw-mode (RC) there is a special hyper context for the VMM
29 * that floats around inside the guest address space. When running in raw-mode
30 * or when using 64-bit guests on a 32-bit host, CPUM also maintains a host
31 * context for saving and restoring registers accross world switches. This latter
32 * is done in cooperation with the world switcher (@see pg_vmm).
33 */
34
35/*******************************************************************************
36* Header Files *
37*******************************************************************************/
38#define LOG_GROUP LOG_GROUP_CPUM
39#include <VBox/cpum.h>
40#include <VBox/cpumdis.h>
41#include <VBox/pgm.h>
42#include <VBox/mm.h>
43#include <VBox/selm.h>
44#include <VBox/dbgf.h>
45#include <VBox/patm.h>
46#include <VBox/ssm.h>
47#include "CPUMInternal.h"
48#include <VBox/vm.h>
49
50#include <VBox/param.h>
51#include <VBox/dis.h>
52#include <VBox/err.h>
53#include <VBox/log.h>
54#include <iprt/assert.h>
55#include <iprt/asm.h>
56#include <iprt/string.h>
57#include <iprt/system.h>
58
59
60/*******************************************************************************
61* Defined Constants And Macros *
62*******************************************************************************/
63/** The saved state version. */
64#define CPUM_SAVED_STATE_VERSION 8
65
66
67/*******************************************************************************
68* Structures and Typedefs *
69*******************************************************************************/
70
71/**
72 * What kind of cpu info dump to perform.
73 */
74typedef enum CPUMDUMPTYPE
75{
76 CPUMDUMPTYPE_TERSE,
77 CPUMDUMPTYPE_DEFAULT,
78 CPUMDUMPTYPE_VERBOSE
79
80} CPUMDUMPTYPE;
81/** Pointer to a cpu info dump type. */
82typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static int cpumR3CpuIdInit(PVM pVM);
89static DECLCALLBACK(int) cpumR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) cpumR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
92static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
93static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
94static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
95static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
96static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
97
98
99/**
100 * Initializes the CPUM.
101 *
102 * @returns VBox status code.
103 * @param pVM The VM to operate on.
104 */
105CPUMR3DECL(int) CPUMR3Init(PVM pVM)
106{
107 LogFlow(("CPUMR3Init\n"));
108
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertRelease(!(RT_OFFSETOF(VM, cpum.s) & 31));
113 AssertRelease(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
114
115 /*
116 * Setup any fixed pointers and offsets.
117 */
118 pVM->cpum.s.offVM = RT_OFFSETOF(VM, cpum);
119 pVM->cpum.s.pCPUMHC = &pVM->cpum.s;
120 pVM->cpum.s.pHyperCoreR3 = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
121 pVM->cpum.s.pHyperCoreR0 = VM_R0_ADDR(pVM, CPUMCTX2CORE(&pVM->cpum.s.Hyper));
122
123 /* Hidden selector registers are invalid by default. */
124 pVM->cpum.s.fValidHiddenSelRegs = false;
125
126 /*
127 * Check that the CPU supports the minimum features we require.
128 */
129 /** @todo check the contract! */
130 if (!ASMHasCpuId())
131 {
132 Log(("The CPU doesn't support CPUID!\n"));
133 return VERR_UNSUPPORTED_CPU;
134 }
135 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
136
137 /* Setup the CR4 AND and OR masks used in the switcher */
138 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
139 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
140 {
141 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
142 /* No FXSAVE implies no SSE */
143 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
144 pVM->cpum.s.CR4.OrMask = 0;
145 }
146 else
147 {
148 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
149 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
150 }
151
152 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
153 {
154 Log(("The CPU doesn't support MMX!\n"));
155 return VERR_UNSUPPORTED_CPU;
156 }
157 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
158 {
159 Log(("The CPU doesn't support TSC!\n"));
160 return VERR_UNSUPPORTED_CPU;
161 }
162 /* Bogus on AMD? */
163 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
164 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
165
166 /*
167 * Setup hypervisor startup values.
168 */
169
170 /*
171 * Register saved state data item.
172 */
173 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
174 NULL, cpumR3Save, NULL,
175 NULL, cpumR3Load, NULL);
176 if (VBOX_FAILURE(rc))
177 return rc;
178
179 /* Query the CPU manufacturer. */
180 uint32_t uEAX, uEBX, uECX, uEDX;
181 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
182 if ( uEAX >= 1
183 && uEBX == X86_CPUID_VENDOR_AMD_EBX
184 && uECX == X86_CPUID_VENDOR_AMD_ECX
185 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
186 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_AMD;
187 else if ( uEAX >= 1
188 && uEBX == X86_CPUID_VENDOR_INTEL_EBX
189 && uECX == X86_CPUID_VENDOR_INTEL_ECX
190 && uEDX == X86_CPUID_VENDOR_INTEL_EDX)
191 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_INTEL;
192 else /** @todo Via */
193 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_UNKNOWN;
194
195 /*
196 * Register info handlers.
197 */
198 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
199 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
200 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
201 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
202 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
203 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
204
205 /*
206 * Initialize the Guest CPU state.
207 */
208 rc = cpumR3CpuIdInit(pVM);
209 if (VBOX_FAILURE(rc))
210 return rc;
211 CPUMR3Reset(pVM);
212 return VINF_SUCCESS;
213}
214
215
216/**
217 * Initializes the emulated CPU's cpuid information.
218 *
219 * @returns VBox status code.
220 * @param pVM The VM to operate on.
221 */
222static int cpumR3CpuIdInit(PVM pVM)
223{
224 PCPUM pCPUM = &pVM->cpum.s;
225 uint32_t i;
226
227 /*
228 * Get the host CPUIDs.
229 */
230 for (i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
231 ASMCpuId_Idx_ECX(i, 0,
232 &pCPUM->aGuestCpuIdStd[i].eax, &pCPUM->aGuestCpuIdStd[i].ebx,
233 &pCPUM->aGuestCpuIdStd[i].ecx, &pCPUM->aGuestCpuIdStd[i].edx);
234 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
235 ASMCpuId(0x80000000 + i,
236 &pCPUM->aGuestCpuIdExt[i].eax, &pCPUM->aGuestCpuIdExt[i].ebx,
237 &pCPUM->aGuestCpuIdExt[i].ecx, &pCPUM->aGuestCpuIdExt[i].edx);
238 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
239 ASMCpuId(0xc0000000 + i,
240 &pCPUM->aGuestCpuIdCentaur[i].eax, &pCPUM->aGuestCpuIdCentaur[i].ebx,
241 &pCPUM->aGuestCpuIdCentaur[i].ecx, &pCPUM->aGuestCpuIdCentaur[i].edx);
242
243
244 /*
245 * Only report features we can support.
246 */
247 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
248 | X86_CPUID_FEATURE_EDX_VME
249 | X86_CPUID_FEATURE_EDX_DE
250 | X86_CPUID_FEATURE_EDX_PSE
251 | X86_CPUID_FEATURE_EDX_TSC
252 | X86_CPUID_FEATURE_EDX_MSR
253 //| X86_CPUID_FEATURE_EDX_PAE - not implemented yet.
254 | X86_CPUID_FEATURE_EDX_MCE
255 | X86_CPUID_FEATURE_EDX_CX8
256 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
257 /** @note we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
258 //| X86_CPUID_FEATURE_EDX_SEP
259 //| X86_CPUID_FEATURE_EDX_MTRR - no MTRRs.
260 | X86_CPUID_FEATURE_EDX_PGE
261 //| X86_CPUID_FEATURE_EDX_MCA - not virtualized.
262 | X86_CPUID_FEATURE_EDX_CMOV
263 //| X86_CPUID_FEATURE_EDX_PAT - not virtualized.
264 //| X86_CPUID_FEATURE_EDX_PSE36 - not virtualized.
265 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
266 | X86_CPUID_FEATURE_EDX_CLFSH
267 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
268 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.
269 | X86_CPUID_FEATURE_EDX_MMX
270 | X86_CPUID_FEATURE_EDX_FXSR
271 | X86_CPUID_FEATURE_EDX_SSE
272 | X86_CPUID_FEATURE_EDX_SSE2
273 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
274 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.
275 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
276 //| X86_CPUID_FEATURE_EDX_PBE - no pneding break enabled.
277 | 0;
278 pCPUM->aGuestCpuIdStd[1].ecx &= 0//X86_CPUID_FEATURE_ECX_SSE3 - not supported by the recompiler yet.
279 | X86_CPUID_FEATURE_ECX_MONITOR
280 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
281 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
282 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
283 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
284 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
285 /** ECX Bit 13 - CX16 - CMPXCHG16B. */
286 //| X86_CPUID_FEATURE_ECX_CX16
287 /** ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
288 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
289 /** ECX Bit 23 - POPCOUNT instruction. */
290 //| X86_CPUID_FEATURE_ECX_POPCOUNT
291 | 0;
292
293 /* ASSUMES that this is ALWAYS the AMD define feature set if present. */
294 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
295 | X86_CPUID_AMD_FEATURE_EDX_VME
296 | X86_CPUID_AMD_FEATURE_EDX_DE
297 | X86_CPUID_AMD_FEATURE_EDX_PSE
298 | X86_CPUID_AMD_FEATURE_EDX_TSC
299 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
300 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.
301 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
302 | X86_CPUID_AMD_FEATURE_EDX_CX8
303 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
304 /** @note we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
305 //| X86_CPUID_AMD_FEATURE_EDX_SEP
306 //| X86_CPUID_AMD_FEATURE_EDX_MTRR - not virtualized.
307 | X86_CPUID_AMD_FEATURE_EDX_PGE
308 //| X86_CPUID_AMD_FEATURE_EDX_MCA - not virtualized.
309 | X86_CPUID_AMD_FEATURE_EDX_CMOV
310 | X86_CPUID_AMD_FEATURE_EDX_PAT
311 //| X86_CPUID_AMD_FEATURE_EDX_PSE36 - not virtualized.
312 //| X86_CPUID_AMD_FEATURE_EDX_NX - not virtualized, requires PAE.
313 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
314 | X86_CPUID_AMD_FEATURE_EDX_MMX
315 | X86_CPUID_AMD_FEATURE_EDX_FXSR
316 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
317 //| X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
318 //| X86_CPUID_AMD_FEATURE_EDX_RDTSCP
319 //| X86_CPUID_AMD_FEATURE_EDX_LONG_MODE - not yet.
320 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
321 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
322 | 0;
323 pCPUM->aGuestCpuIdExt[1].ecx &= 0
324 //| X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF
325 //| X86_CPUID_AMD_FEATURE_ECX_CMPL
326 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
327 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
328 //| X86_CPUID_AMD_FEATURE_ECX_CR8L
329 //| X86_CPUID_AMD_FEATURE_ECX_ABM
330 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A
331 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
332 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
333 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
334 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
335 //| X86_CPUID_AMD_FEATURE_ECX_WDT
336 | 0;
337
338 /*
339 * Hide HTT, multicode, SMP, whatever.
340 * (APIC-ID := 0 and #LogCpus := 0)
341 */
342 pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
343
344 /*
345 * Determin the default.
346 *
347 * Intel returns values of the highest standard function, while AMD
348 * returns zeros. VIA on the other hand seems to returning nothing or
349 * perhaps some random garbage, we don't try duplicate this behavior.
350 */
351 ASMCpuId(pCPUM->aGuestCpuIdStd[0].eax + 10,
352 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
353 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
354
355 /*
356 * Limit it the number of entries and fill the remaining with the defaults.
357 *
358 * The limits are masking off stuff about power saving and similar, this
359 * is perhaps a bit crudely done as there is probably some relatively harmless
360 * info too in these leaves (like words about having a constant TSC).
361 */
362 if (pCPUM->aGuestCpuIdStd[0].eax > 2)
363 pCPUM->aGuestCpuIdStd[0].eax = 2;
364 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
365 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
366
367 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000004))
368 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000004);
369 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
370 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
371 : 0;
372 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
373 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
374
375 /*
376 * Workaround for missing cpuid(0) patches: If we miss to patch a cpuid(0).eax then
377 * Linux tries to determine the number of processors from (cpuid(4).eax >> 26) + 1.
378 * We don't support more than 1 processor.
379 */
380 pCPUM->aGuestCpuIdStd[4].eax = 0;
381
382 /*
383 * Centaur stuff (VIA).
384 *
385 * The important part here (we think) is to make sure the 0xc0000000
386 * function returns 0xc0000001. As for the features, we don't currently
387 * let on about any of those... 0xc0000002 seems to be some
388 * temperature/hz/++ stuff, include it as well (static).
389 */
390 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
391 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
392 {
393 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
394 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
395 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
396 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
397 i++)
398 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
399 }
400 else
401 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
402 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
403
404
405 /*
406 * Load CPUID overrides from configuration.
407 */
408 PCPUMCPUID pCpuId = &pCPUM->aGuestCpuIdStd[0];
409 uint32_t cElements = ELEMENTS(pCPUM->aGuestCpuIdStd);
410 for (i=0;; )
411 {
412 while (cElements-- > 0)
413 {
414 PCFGMNODE pNode = CFGMR3GetChildF(CFGMR3GetRoot(pVM), "CPUM/CPUID/%RX32", i);
415 if (pNode)
416 {
417 uint32_t u32;
418 int rc = CFGMR3QueryU32(pNode, "eax", &u32);
419 if (VBOX_SUCCESS(rc))
420 pCpuId->eax = u32;
421 else
422 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
423
424 rc = CFGMR3QueryU32(pNode, "ebx", &u32);
425 if (VBOX_SUCCESS(rc))
426 pCpuId->ebx = u32;
427 else
428 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
429
430 rc = CFGMR3QueryU32(pNode, "ecx", &u32);
431 if (VBOX_SUCCESS(rc))
432 pCpuId->ecx = u32;
433 else
434 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
435
436 rc = CFGMR3QueryU32(pNode, "edx", &u32);
437 if (VBOX_SUCCESS(rc))
438 pCpuId->edx = u32;
439 else
440 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
441 }
442 pCpuId++;
443 i++;
444 }
445
446 /* next */
447 if ((i & UINT32_C(0xc0000000)) == 0)
448 {
449 pCpuId = &pCPUM->aGuestCpuIdExt[0];
450 cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
451 i = UINT32_C(0x80000000);
452 }
453 else if ((i & UINT32_C(0xc0000000)) == UINT32_C(0x80000000))
454 {
455 pCpuId = &pCPUM->aGuestCpuIdCentaur[0];
456 cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
457 i = UINT32_C(0xc0000000);
458 }
459 else
460 break;
461 }
462
463 /* Check if PAE was explicitely enabled by the user. */
464 bool fEnable = false;
465 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable);
466 if (VBOX_SUCCESS(rc) && fEnable)
467 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
468
469 /*
470 * Log the cpuid and we're good.
471 */
472 LogRel(("Logical host processors: %d, processor active mask: %08x\n",
473 RTSystemProcessorGetCount(), RTSystemProcessorGetActiveMask()));
474 LogRel(("************************* CPUID dump ************************\n"));
475 DBGFR3Info(pVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
476 LogRel(("\n"));
477 DBGFR3InfoLog(pVM, "cpuid", "verbose"); /* macro */
478 LogRel(("******************** End of CPUID dump **********************\n"));
479 return VINF_SUCCESS;
480}
481
482
483
484
485/**
486 * Applies relocations to data and code managed by this
487 * component. This function will be called at init and
488 * whenever the VMM need to relocate it self inside the GC.
489 *
490 * The CPUM will update the addresses used by the switcher.
491 *
492 * @param pVM The VM.
493 */
494CPUMR3DECL(void) CPUMR3Relocate(PVM pVM)
495{
496 LogFlow(("CPUMR3Relocate\n"));
497 /*
498 * Switcher pointers.
499 */
500 pVM->cpum.s.pCPUMGC = VM_GUEST_ADDR(pVM, &pVM->cpum.s);
501 pVM->cpum.s.pHyperCoreGC = MMHyperCCToGC(pVM, pVM->cpum.s.pHyperCoreR3);
502 Assert(pVM->cpum.s.pHyperCoreGC != NIL_RTGCPTR);
503}
504
505
506/**
507 * Queries the pointer to the internal CPUMCTX structure
508 *
509 * @returns VBox status code.
510 * @param pVM Handle to the virtual machine.
511 * @param ppCtx Receives the CPUMCTX GC pointer when successful.
512 */
513CPUMR3DECL(int) CPUMR3QueryGuestCtxGCPtr(PVM pVM, RCPTRTYPE(PCPUMCTX) *ppCtx)
514{
515 LogFlow(("CPUMR3QueryGuestCtxGCPtr\n"));
516 /*
517 * Store the address. (Later we might check how's calling, thus the RC.)
518 */
519 *ppCtx = VM_GUEST_ADDR(pVM, &pVM->cpum.s.Guest);
520 return VINF_SUCCESS;
521}
522
523
524/**
525 * Terminates the CPUM.
526 *
527 * Termination means cleaning up and freeing all resources,
528 * the VM it self is at this point powered off or suspended.
529 *
530 * @returns VBox status code.
531 * @param pVM The VM to operate on.
532 */
533CPUMR3DECL(int) CPUMR3Term(PVM pVM)
534{
535 /** @todo ? */
536 return 0;
537}
538
539
540/**
541 * Resets the CPU.
542 *
543 * @returns VINF_SUCCESS.
544 * @param pVM The VM handle.
545 */
546CPUMR3DECL(void) CPUMR3Reset(PVM pVM)
547{
548 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
549
550 /*
551 * Initialize everything to ZERO first.
552 */
553 uint32_t fUseFlags = pVM->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
554 memset(pCtx, 0, sizeof(*pCtx));
555 pVM->cpum.s.fUseFlags = fUseFlags;
556
557 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
558 pCtx->eip = 0x0000fff0;
559 pCtx->edx = 0x00000600; /* P6 processor */
560 pCtx->eflags.Bits.u1Reserved0 = 1;
561
562 pCtx->cs = 0xf000;
563 pCtx->csHid.u64Base = UINT64_C(0xffff0000);
564 pCtx->csHid.u32Limit = 0x0000ffff;
565 pCtx->csHid.Attr.n.u1DescType = 1; /* code/data segment */
566 pCtx->csHid.Attr.n.u1Present = 1;
567 pCtx->csHid.Attr.n.u4Type = X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
568
569 pCtx->dsHid.u32Limit = 0x0000ffff;
570 pCtx->dsHid.Attr.n.u1DescType = 1; /* code/data segment */
571 pCtx->dsHid.Attr.n.u1Present = 1;
572 pCtx->dsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
573
574 pCtx->esHid.u32Limit = 0x0000ffff;
575 pCtx->esHid.Attr.n.u1DescType = 1; /* code/data segment */
576 pCtx->esHid.Attr.n.u1Present = 1;
577 pCtx->esHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
578
579 pCtx->fsHid.u32Limit = 0x0000ffff;
580 pCtx->fsHid.Attr.n.u1DescType = 1; /* code/data segment */
581 pCtx->fsHid.Attr.n.u1Present = 1;
582 pCtx->fsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
583
584 pCtx->gsHid.u32Limit = 0x0000ffff;
585 pCtx->gsHid.Attr.n.u1DescType = 1; /* code/data segment */
586 pCtx->gsHid.Attr.n.u1Present = 1;
587 pCtx->gsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
588
589 pCtx->ssHid.u32Limit = 0x0000ffff;
590 pCtx->ssHid.Attr.n.u1Present = 1;
591 pCtx->ssHid.Attr.n.u1DescType = 1; /* code/data segment */
592 pCtx->ssHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
593
594 pCtx->idtr.cbIdt = 0xffff;
595 pCtx->gdtr.cbGdt = 0xffff;
596
597 pCtx->ldtrHid.u32Limit = 0xffff;
598 pCtx->ldtrHid.Attr.n.u1Present = 1;
599 pCtx->ldtrHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
600
601 pCtx->trHid.u32Limit = 0xffff;
602 pCtx->trHid.Attr.n.u1Present = 1;
603 pCtx->trHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
604
605 pCtx->dr6 = UINT32_C(0xFFFF0FF0);
606 pCtx->dr7 = 0x400;
607
608 pCtx->fpu.FTW = 0xff; /* All tags are set, i.e. the regs are empty. */
609 pCtx->fpu.FCW = 0x37f;
610
611 /* Init PAT MSR */
612 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
613}
614
615
616/**
617 * Execute state save operation.
618 *
619 * @returns VBox status code.
620 * @param pVM VM Handle.
621 * @param pSSM SSM operation handle.
622 */
623static DECLCALLBACK(int) cpumR3Save(PVM pVM, PSSMHANDLE pSSM)
624{
625 /*
626 * Save.
627 */
628 SSMR3PutMem(pSSM, &pVM->cpum.s.Hyper, sizeof(pVM->cpum.s.Hyper));
629 SSMR3PutMem(pSSM, &pVM->cpum.s.Guest, sizeof(pVM->cpum.s.Guest));
630 SSMR3PutU32(pSSM, pVM->cpum.s.fUseFlags);
631 SSMR3PutU32(pSSM, pVM->cpum.s.fChanged);
632
633 SSMR3PutU32(pSSM, ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
634 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
635
636 SSMR3PutU32(pSSM, ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
637 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
638
639 SSMR3PutU32(pSSM, ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
640 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
641
642 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
643
644 /* Add the cpuid for checking that the cpu is unchanged. */
645 uint32_t au32CpuId[8] = {0};
646 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
647 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
648 return SSMR3PutMem(pSSM, &au32CpuId[0], sizeof(au32CpuId));
649}
650
651
652/**
653 * Execute state load operation.
654 *
655 * @returns VBox status code.
656 * @param pVM VM Handle.
657 * @param pSSM SSM operation handle.
658 * @param u32Version Data layout version.
659 */
660static DECLCALLBACK(int) cpumR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
661{
662 /*
663 * Validate version.
664 */
665 if (u32Version != CPUM_SAVED_STATE_VERSION)
666 {
667 Log(("cpuR3Load: Invalid version u32Version=%d!\n", u32Version));
668 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
669 }
670
671 /*
672 * Restore.
673 */
674 uint32_t uCR3 = pVM->cpum.s.Hyper.cr3;
675 uint32_t uESP = pVM->cpum.s.Hyper.esp; /* see VMMR3Relocate(). */
676 SSMR3GetMem(pSSM, &pVM->cpum.s.Hyper, sizeof(pVM->cpum.s.Hyper));
677 pVM->cpum.s.Hyper.cr3 = uCR3;
678 pVM->cpum.s.Hyper.esp = uESP;
679 SSMR3GetMem(pSSM, &pVM->cpum.s.Guest, sizeof(pVM->cpum.s.Guest));
680 SSMR3GetU32(pSSM, &pVM->cpum.s.fUseFlags);
681 SSMR3GetU32(pSSM, &pVM->cpum.s.fChanged);
682
683 uint32_t cElements;
684 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
685 if (cElements != ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
686 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
687 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
688
689 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
690 if (cElements != ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
691 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
692 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
693
694 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
695 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
696 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
697 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
698
699 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
700
701 /*
702 * Check that the basic cpuid id information is unchanged.
703 */
704 uint32_t au32CpuId[8] = {0};
705 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
706 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
707 uint32_t au32CpuIdSaved[8];
708 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
709 if (VBOX_SUCCESS(rc))
710 {
711 /* Ignore APIC ID (AMD specs). */
712 au32CpuId[5] &= ~0xff000000;
713 au32CpuIdSaved[5] &= ~0xff000000;
714 /* Ignore the number of Logical CPUs (AMD specs). */
715 au32CpuId[5] &= ~0x00ff0000;
716 au32CpuIdSaved[5] &= ~0x00ff0000;
717
718 /* do the compare */
719 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
720 {
721 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
722 LogRel(("cpumR3Load: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
723 "Saved=%.*Vhxs\n"
724 "Real =%.*Vhxs\n",
725 sizeof(au32CpuIdSaved), au32CpuIdSaved,
726 sizeof(au32CpuId), au32CpuId));
727 else
728 {
729 LogRel(("cpumR3Load: CpuId mismatch!\n"
730 "Saved=%.*Vhxs\n"
731 "Real =%.*Vhxs\n",
732 sizeof(au32CpuIdSaved), au32CpuIdSaved,
733 sizeof(au32CpuId), au32CpuId));
734 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
735 }
736 }
737 }
738
739 return rc;
740}
741
742
743/**
744 * Formats the EFLAGS value into mnemonics.
745 *
746 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
747 * @param efl The EFLAGS value.
748 */
749static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
750{
751 /*
752 * Format the flags.
753 */
754 static struct
755 {
756 const char *pszSet; const char *pszClear; uint32_t fFlag;
757 } s_aFlags[] =
758 {
759 { "vip",NULL, X86_EFL_VIP },
760 { "vif",NULL, X86_EFL_VIF },
761 { "ac", NULL, X86_EFL_AC },
762 { "vm", NULL, X86_EFL_VM },
763 { "rf", NULL, X86_EFL_RF },
764 { "nt", NULL, X86_EFL_NT },
765 { "ov", "nv", X86_EFL_OF },
766 { "dn", "up", X86_EFL_DF },
767 { "ei", "di", X86_EFL_IF },
768 { "tf", NULL, X86_EFL_TF },
769 { "nt", "pl", X86_EFL_SF },
770 { "nz", "zr", X86_EFL_ZF },
771 { "ac", "na", X86_EFL_AF },
772 { "po", "pe", X86_EFL_PF },
773 { "cy", "nc", X86_EFL_CF },
774 };
775 char *psz = pszEFlags;
776 for (unsigned i = 0; i < ELEMENTS(s_aFlags); i++)
777 {
778 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
779 if (pszAdd)
780 {
781 strcpy(psz, pszAdd);
782 psz += strlen(pszAdd);
783 *psz++ = ' ';
784 }
785 }
786 psz[-1] = '\0';
787}
788
789
790/**
791 * Formats a full register dump.
792 *
793 * @param pVM VM Handle.
794 * @param pCtx The context to format.
795 * @param pCtxCore The context core to format.
796 * @param pHlp Output functions.
797 * @param enmType The dump type.
798 * @param pszPrefix Register name prefix.
799 */
800static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType, const char *pszPrefix)
801{
802 /*
803 * Format the EFLAGS.
804 */
805 uint32_t efl = pCtxCore->eflags.u32;
806 char szEFlags[80];
807 cpumR3InfoFormatFlags(&szEFlags[0], efl);
808
809 /*
810 * Format the registers.
811 */
812 switch (enmType)
813 {
814 case CPUMDUMPTYPE_TERSE:
815 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
816 {
817 pHlp->pfnPrintf(pHlp,
818 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
819 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
820 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
821 "%sr14=%016RX64 %sr15=%016RX64\n"
822 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
823 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
824 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
825 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
826 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
827 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
828 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
829 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
830 }
831 else
832 pHlp->pfnPrintf(pHlp,
833 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
834 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
835 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
836 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
837 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
838 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
839 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
840 break;
841
842 case CPUMDUMPTYPE_DEFAULT:
843 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
844 {
845 pHlp->pfnPrintf(pHlp,
846 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
847 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
848 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
849 "%sr14=%016RX64 %sr15=%016RX64\n"
850 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
851 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
852 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%VGv:%04x %sldtr=%04x\n"
853 ,
854 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
855 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
856 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
857 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
858 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
859 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
860 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
861 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
862 }
863 else
864 pHlp->pfnPrintf(pHlp,
865 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
866 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
867 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
868 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%VGv:%04x %sldtr=%04x\n"
869 ,
870 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
871 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
872 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
873 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
874 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
875 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
876 break;
877
878 case CPUMDUMPTYPE_VERBOSE:
879 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
880 {
881 pHlp->pfnPrintf(pHlp,
882 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
883 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
884 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
885 "%sr14=%016RX64 %sr15=%016RX64\n"
886 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
887 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
888 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
889 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
890 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
891 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
892 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
893 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
894 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
895 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
896 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
897 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
898 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
899 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
900 ,
901 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
902 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
903 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
904 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
905 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
906 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
907 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
908 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
909 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
910 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
911 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
912 pszPrefix, pCtx->dr0, pszPrefix, pCtx->dr1, pszPrefix, pCtx->dr2, pszPrefix, pCtx->dr3,
913 pszPrefix, pCtx->dr4, pszPrefix, pCtx->dr5, pszPrefix, pCtx->dr6, pszPrefix, pCtx->dr7,
914 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
915 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
916 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
917 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
918 }
919 else
920 pHlp->pfnPrintf(pHlp,
921 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
922 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
923 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
924 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
925 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
926 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
927 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
928 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
929 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
930 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
931 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
932 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
933 ,
934 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
935 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
936 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pszPrefix, pCtx->dr0, pszPrefix, pCtx->dr1,
937 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pszPrefix, pCtx->dr2, pszPrefix, pCtx->dr3,
938 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pszPrefix, pCtx->dr4, pszPrefix, pCtx->dr5,
939 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pszPrefix, pCtx->dr6, pszPrefix, pCtx->dr7,
940 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
941 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
942 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
943 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
944 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
945 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
946
947 pHlp->pfnPrintf(pHlp,
948 "FPU:\n"
949 "%sFCW=%04x %sFSW=%04x %sFTW=%02x\n"
950 "%sres1=%02x %sFOP=%04x %sFPUIP=%08x %sCS=%04x %sRsvrd1=%04x\n"
951 "%sFPUDP=%04x %sDS=%04x %sRsvrd2=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
952 ,
953 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW,
954 pszPrefix, pCtx->fpu.huh1, pszPrefix, pCtx->fpu.FOP, pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsvrd1,
955 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2,
956 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK);
957
958
959 pHlp->pfnPrintf(pHlp,
960 "MSR:\n"
961 "%sEFER =%016RX64\n"
962 "%sPAT =%016RX64\n"
963 "%sSTAR =%016RX64\n"
964 "%sCSTAR =%016RX64\n"
965 "%sLSTAR =%016RX64\n"
966 "%sSFMASK =%016RX64\n"
967 "%sKERNELGSBASE =%016RX64\n",
968 pszPrefix, pCtx->msrEFER,
969 pszPrefix, pCtx->msrPAT,
970 pszPrefix, pCtx->msrSTAR,
971 pszPrefix, pCtx->msrCSTAR,
972 pszPrefix, pCtx->msrLSTAR,
973 pszPrefix, pCtx->msrSFMASK,
974 pszPrefix, pCtx->msrKERNELGSBASE);
975
976 break;
977 }
978}
979
980
981/**
982 * Display all cpu states and any other cpum info.
983 *
984 * @param pVM VM Handle.
985 * @param pHlp The info helper functions.
986 * @param pszArgs Arguments, ignored.
987 */
988static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
989{
990 cpumR3InfoGuest(pVM, pHlp, pszArgs);
991 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
992 cpumR3InfoHyper(pVM, pHlp, pszArgs);
993 cpumR3InfoHost(pVM, pHlp, pszArgs);
994}
995
996
997/**
998 * Parses the info argument.
999 *
1000 * The argument starts with 'verbose', 'terse' or 'default' and then
1001 * continues with the comment string.
1002 *
1003 * @param pszArgs The pointer to the argument string.
1004 * @param penmType Where to store the dump type request.
1005 * @param ppszComment Where to store the pointer to the comment string.
1006 */
1007static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
1008{
1009 if (!pszArgs)
1010 {
1011 *penmType = CPUMDUMPTYPE_DEFAULT;
1012 *ppszComment = "";
1013 }
1014 else
1015 {
1016 if (!strncmp(pszArgs, "verbose", sizeof("verbose") - 1))
1017 {
1018 pszArgs += 5;
1019 *penmType = CPUMDUMPTYPE_VERBOSE;
1020 }
1021 else if (!strncmp(pszArgs, "terse", sizeof("terse") - 1))
1022 {
1023 pszArgs += 5;
1024 *penmType = CPUMDUMPTYPE_TERSE;
1025 }
1026 else if (!strncmp(pszArgs, "default", sizeof("default") - 1))
1027 {
1028 pszArgs += 7;
1029 *penmType = CPUMDUMPTYPE_DEFAULT;
1030 }
1031 else
1032 *penmType = CPUMDUMPTYPE_DEFAULT;
1033 *ppszComment = RTStrStripL(pszArgs);
1034 }
1035}
1036
1037
1038/**
1039 * Display the guest cpu state.
1040 *
1041 * @param pVM VM Handle.
1042 * @param pHlp The info helper functions.
1043 * @param pszArgs Arguments, ignored.
1044 */
1045static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1046{
1047 CPUMDUMPTYPE enmType;
1048 const char *pszComment;
1049 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1050 pHlp->pfnPrintf(pHlp, "Guest CPUM state: %s\n", pszComment);
1051 cpumR3InfoOne(pVM, &pVM->cpum.s.Guest, CPUMCTX2CORE(&pVM->cpum.s.Guest), pHlp, enmType, "");
1052}
1053
1054/**
1055 * Display the current guest instruction
1056 *
1057 * @param pVM VM Handle.
1058 * @param pHlp The info helper functions.
1059 * @param pszArgs Arguments, ignored.
1060 */
1061static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1062{
1063 char szInstruction[256];
1064 int rc = DBGFR3DisasInstrCurrent(pVM, szInstruction, sizeof(szInstruction));
1065 if (VBOX_SUCCESS(rc))
1066 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
1067}
1068
1069
1070/**
1071 * Display the hypervisor cpu state.
1072 *
1073 * @param pVM VM Handle.
1074 * @param pHlp The info helper functions.
1075 * @param pszArgs Arguments, ignored.
1076 */
1077static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1078{
1079 CPUMDUMPTYPE enmType;
1080 const char *pszComment;
1081 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1082 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
1083 cpumR3InfoOne(pVM, &pVM->cpum.s.Hyper, pVM->cpum.s.pHyperCoreR3, pHlp, enmType, ".");
1084 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
1085}
1086
1087
1088/**
1089 * Display the host cpu state.
1090 *
1091 * @param pVM VM Handle.
1092 * @param pHlp The info helper functions.
1093 * @param pszArgs Arguments, ignored.
1094 */
1095static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1096{
1097 CPUMDUMPTYPE enmType;
1098 const char *pszComment;
1099 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1100 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
1101
1102 /*
1103 * Format the EFLAGS.
1104 */
1105 PCPUMHOSTCTX pCtx = &pVM->cpum.s.Host;
1106#if HC_ARCH_BITS == 32
1107 uint32_t efl = pCtx->eflags.u32;
1108#else
1109 uint64_t efl = pCtx->rflags;
1110#endif
1111 char szEFlags[80];
1112 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1113
1114 /*
1115 * Format the registers.
1116 */
1117#if HC_ARCH_BITS == 32
1118# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1119 if (!(pCtx->efer & MSR_K6_EFER_LMA))
1120# endif
1121 {
1122 pHlp->pfnPrintf(pHlp,
1123 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
1124 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
1125 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
1126 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
1127 "dr0=%08RX64 dr1=%08RX64x dr2=%08RX64 dr3=%08RX64x dr6=%08RX64 dr7=%08RX64\n"
1128 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1129 ,
1130 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
1131 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
1132 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1133 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
1134 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
1135 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, (RTSEL)pCtx->ldtr,
1136 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1137 }
1138# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1139 else
1140# endif
1141#endif
1142#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1143 {
1144 pHlp->pfnPrintf(pHlp,
1145 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
1146 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
1147 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
1148 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
1149 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1150 "r14=%016RX64 r15=%016RX64\n"
1151 "iopl=%d %31s\n"
1152 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
1153 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
1154 "cr4=%016RX64 cr8=%016RX64 ldtr=%04x tr=%04x\n"
1155 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64\n"
1156 "dr3=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1157 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
1158 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1159 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
1160 ,
1161 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
1162 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
1163 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
1164 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
1165 pCtx->r11, pCtx->r12, pCtx->r13,
1166 pCtx->r14, pCtx->r15,
1167 X86_EFL_GET_IOPL(efl), szEFlags,
1168 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1169 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
1170 pCtx->cr4, pCtx->cr8, pCtx->ldtr, pCtx->tr,
1171 pCtx->dr0, pCtx->dr1, pCtx->dr2,
1172 pCtx->dr3, pCtx->dr6, pCtx->dr7,
1173 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
1174 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1175 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
1176 }
1177#endif
1178}
1179
1180
1181/**
1182 * Get L1 cache / TLS associativity.
1183 */
1184static const char *getCacheAss(unsigned u, char *pszBuf)
1185{
1186 if (u == 0)
1187 return "res0 ";
1188 if (u == 1)
1189 return "direct";
1190 if (u >= 256)
1191 return "???";
1192
1193 RTStrPrintf(pszBuf, 16, "%d way", u);
1194 return pszBuf;
1195}
1196
1197
1198/**
1199 * Get L2 cache soociativity.
1200 */
1201const char *getL2CacheAss(unsigned u)
1202{
1203 switch (u)
1204 {
1205 case 0: return "off ";
1206 case 1: return "direct";
1207 case 2: return "2 way ";
1208 case 3: return "res3 ";
1209 case 4: return "4 way ";
1210 case 5: return "res5 ";
1211 case 6: return "8 way "; case 7: return "res7 ";
1212 case 8: return "16 way";
1213 case 9: return "res9 ";
1214 case 10: return "res10 ";
1215 case 11: return "res11 ";
1216 case 12: return "res12 ";
1217 case 13: return "res13 ";
1218 case 14: return "res14 ";
1219 case 15: return "fully ";
1220 default:
1221 return "????";
1222 }
1223}
1224
1225
1226/**
1227 * Display the guest CpuId leaves.
1228 *
1229 * @param pVM VM Handle.
1230 * @param pHlp The info helper functions.
1231 * @param pszArgs "terse", "default" or "verbose".
1232 */
1233static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1234{
1235 /*
1236 * Parse the argument.
1237 */
1238 unsigned iVerbosity = 1;
1239 if (pszArgs)
1240 {
1241 pszArgs = RTStrStripL(pszArgs);
1242 if (!strcmp(pszArgs, "terse"))
1243 iVerbosity--;
1244 else if (!strcmp(pszArgs, "verbose"))
1245 iVerbosity++;
1246 }
1247
1248 /*
1249 * Start cracking.
1250 */
1251 CPUMCPUID Host;
1252 CPUMCPUID Guest;
1253 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdStd[0].eax;
1254
1255 pHlp->pfnPrintf(pHlp,
1256 " RAW Standard CPUIDs\n"
1257 " Function eax ebx ecx edx\n");
1258 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
1259 {
1260 Guest = pVM->cpum.s.aGuestCpuIdStd[i];
1261 ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1262
1263 pHlp->pfnPrintf(pHlp,
1264 "Gst: %08x %08x %08x %08x %08x%s\n"
1265 "Hst: %08x %08x %08x %08x\n",
1266 i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1267 i <= cStdMax ? "" : "*",
1268 Host.eax, Host.ebx, Host.ecx, Host.edx);
1269 }
1270
1271 /*
1272 * If verbose, decode it.
1273 */
1274 if (iVerbosity)
1275 {
1276 Guest = pVM->cpum.s.aGuestCpuIdStd[0];
1277 pHlp->pfnPrintf(pHlp,
1278 "Name: %.04s%.04s%.04s\n"
1279 "Supports: 0-%x\n",
1280 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
1281 }
1282
1283 /*
1284 * Get Features.
1285 */
1286 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdStd[0].ebx,
1287 pVM->cpum.s.aGuestCpuIdStd[0].ecx,
1288 pVM->cpum.s.aGuestCpuIdStd[0].edx);
1289 if (cStdMax >= 1 && iVerbosity)
1290 {
1291 Guest = pVM->cpum.s.aGuestCpuIdStd[1];
1292 uint32_t uEAX = Guest.eax;
1293
1294 pHlp->pfnPrintf(pHlp,
1295 "Family: %d \tExtended: %d \tEffective: %d\n"
1296 "Model: %d \tExtended: %d \tEffective: %d\n"
1297 "Stepping: %d\n"
1298 "APIC ID: %#04x\n"
1299 "Logical CPUs: %d\n"
1300 "CLFLUSH Size: %d\n"
1301 "Brand ID: %#04x\n",
1302 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
1303 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
1304 ASMGetCpuStepping(uEAX),
1305 (Guest.ebx >> 24) & 0xff,
1306 (Guest.ebx >> 16) & 0xff,
1307 (Guest.ebx >> 8) & 0xff,
1308 (Guest.ebx >> 0) & 0xff);
1309 if (iVerbosity == 1)
1310 {
1311 uint32_t uEDX = Guest.edx;
1312 pHlp->pfnPrintf(pHlp, "Features EDX: ");
1313 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
1314 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
1315 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
1316 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
1317 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
1318 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
1319 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
1320 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
1321 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
1322 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
1323 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
1324 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SEP");
1325 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
1326 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
1327 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
1328 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
1329 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
1330 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
1331 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " PSN");
1332 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " CLFSH");
1333 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " 20");
1334 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " DS");
1335 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ACPI");
1336 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
1337 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
1338 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " SSE");
1339 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " SSE2");
1340 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " SS");
1341 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " HTT");
1342 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " TM");
1343 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
1344 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " PBE");
1345 pHlp->pfnPrintf(pHlp, "\n");
1346
1347 uint32_t uECX = Guest.ecx;
1348 pHlp->pfnPrintf(pHlp, "Features ECX: ");
1349 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3");
1350 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " 1");
1351 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " 2");
1352 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " MONITOR");
1353 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " DS-CPL");
1354 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " VMX");
1355 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " 6");
1356 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " EST");
1357 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " TM2");
1358 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " 9");
1359 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " CNXT-ID");
1360 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " 11");
1361 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " 12");
1362 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " CX16");
1363 for (unsigned iBit = 14; iBit < 32; iBit++)
1364 if (uECX & RT_BIT(iBit))
1365 pHlp->pfnPrintf(pHlp, " %d", iBit);
1366 pHlp->pfnPrintf(pHlp, "\n");
1367 }
1368 else
1369 {
1370 ASMCpuId(1, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1371
1372 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
1373 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
1374 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
1375 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
1376
1377 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
1378 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", EdxGuest.u1FPU, EdxHost.u1FPU);
1379 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", EdxGuest.u1VME, EdxHost.u1VME);
1380 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", EdxGuest.u1DE, EdxHost.u1DE);
1381 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", EdxGuest.u1PSE, EdxHost.u1PSE);
1382 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", EdxGuest.u1TSC, EdxHost.u1TSC);
1383 pHlp->pfnPrintf(pHlp, "MSR - Model Specific Registers = %d (%d)\n", EdxGuest.u1MSR, EdxHost.u1MSR);
1384 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", EdxGuest.u1PAE, EdxHost.u1PAE);
1385 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", EdxGuest.u1MCE, EdxHost.u1MCE);
1386 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", EdxGuest.u1CX8, EdxHost.u1CX8);
1387 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", EdxGuest.u1APIC, EdxHost.u1APIC);
1388 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved1, EdxHost.u1Reserved1);
1389 pHlp->pfnPrintf(pHlp, "SEP - SYSENTER and SYSEXIT = %d (%d)\n", EdxGuest.u1SEP, EdxHost.u1SEP);
1390 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", EdxGuest.u1MTRR, EdxHost.u1MTRR);
1391 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", EdxGuest.u1PGE, EdxHost.u1PGE);
1392 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", EdxGuest.u1MCA, EdxHost.u1MCA);
1393 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", EdxGuest.u1CMOV, EdxHost.u1CMOV);
1394 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", EdxGuest.u1PAT, EdxHost.u1PAT);
1395 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", EdxGuest.u1PSE36, EdxHost.u1PSE36);
1396 pHlp->pfnPrintf(pHlp, "PSN - Processor Serial Number = %d (%d)\n", EdxGuest.u1PSN, EdxHost.u1PSN);
1397 pHlp->pfnPrintf(pHlp, "CLFSH - CLFLUSH Instruction. = %d (%d)\n", EdxGuest.u1CLFSH, EdxHost.u1CLFSH);
1398 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved2, EdxHost.u1Reserved2);
1399 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", EdxGuest.u1DS, EdxHost.u1DS);
1400 pHlp->pfnPrintf(pHlp, "ACPI - Thermal Mon. & Soft. Clock Ctrl.= %d (%d)\n", EdxGuest.u1ACPI, EdxHost.u1ACPI);
1401 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", EdxGuest.u1MMX, EdxHost.u1MMX);
1402 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", EdxGuest.u1FXSR, EdxHost.u1FXSR);
1403 pHlp->pfnPrintf(pHlp, "SSE - SSE Support = %d (%d)\n", EdxGuest.u1SSE, EdxHost.u1SSE);
1404 pHlp->pfnPrintf(pHlp, "SSE2 - SSE2 Support = %d (%d)\n", EdxGuest.u1SSE2, EdxHost.u1SSE2);
1405 pHlp->pfnPrintf(pHlp, "SS - Self Snoop = %d (%d)\n", EdxGuest.u1SS, EdxHost.u1SS);
1406 pHlp->pfnPrintf(pHlp, "HTT - Hyper-Threading Technolog = %d (%d)\n", EdxGuest.u1HTT, EdxHost.u1HTT);
1407 pHlp->pfnPrintf(pHlp, "TM - Thermal Monitor = %d (%d)\n", EdxGuest.u1TM, EdxHost.u1TM);
1408 pHlp->pfnPrintf(pHlp, "30 - Reserved = %d (%d)\n", EdxGuest.u1Reserved3, EdxHost.u1Reserved3);
1409 pHlp->pfnPrintf(pHlp, "PBE - Pending Break Enable = %d (%d)\n", EdxGuest.u1PBE, EdxHost.u1PBE);
1410
1411 pHlp->pfnPrintf(pHlp, "Supports SSE3 or not = %d (%d)\n", EcxGuest.u1SSE3, EcxHost.u1SSE3);
1412 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EcxGuest.u2Reserved1, EcxHost.u2Reserved1);
1413 pHlp->pfnPrintf(pHlp, "Supports MONITOR/MWAIT = %d (%d)\n", EcxGuest.u1Monitor, EcxHost.u1Monitor);
1414 pHlp->pfnPrintf(pHlp, "CPL-DS - CPL Qualified Debug Store = %d (%d)\n", EcxGuest.u1CPLDS, EcxHost.u1CPLDS);
1415 pHlp->pfnPrintf(pHlp, "VMX - Virtual Machine Technology = %d (%d)\n", EcxGuest.u1VMX, EcxHost.u1VMX);
1416 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EcxGuest.u1Reserved2, EcxHost.u1Reserved2);
1417 pHlp->pfnPrintf(pHlp, "Enhanced SpeedStep Technology = %d (%d)\n", EcxGuest.u1EST, EcxHost.u1EST);
1418 pHlp->pfnPrintf(pHlp, "Terminal Monitor 2 = %d (%d)\n", EcxGuest.u1TM2, EcxHost.u1TM2);
1419 pHlp->pfnPrintf(pHlp, "Supports Supplemental SSE3 or not = %d (%d)\n", EcxGuest.u1SSSE3, EcxHost.u1SSSE3);
1420 pHlp->pfnPrintf(pHlp, "L1 Context ID = %d (%d)\n", EcxGuest.u1CNTXID, EcxHost.u1CNTXID);
1421 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved4, EcxHost.u2Reserved4);
1422 pHlp->pfnPrintf(pHlp, "CMPXCHG16B = %d (%d)\n", EcxGuest.u1CX16, EcxHost.u1CX16);
1423 pHlp->pfnPrintf(pHlp, "xTPR Update Control = %d (%d)\n", EcxGuest.u1TPRUpdate, EcxHost.u1TPRUpdate);
1424 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u17Reserved5, EcxHost.u17Reserved5);
1425 }
1426 }
1427 if (cStdMax >= 2 && iVerbosity)
1428 {
1429 /** @todo */
1430 }
1431
1432 /*
1433 * Extended.
1434 * Implemented after AMD specs.
1435 */
1436 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdExt[0].eax & 0xffff;
1437
1438 pHlp->pfnPrintf(pHlp,
1439 "\n"
1440 " RAW Extended CPUIDs\n"
1441 " Function eax ebx ecx edx\n");
1442 for (unsigned i = 0; i < ELEMENTS(pVM->cpum.s.aGuestCpuIdExt); i++)
1443 {
1444 Guest = pVM->cpum.s.aGuestCpuIdExt[i];
1445 ASMCpuId(0x80000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1446
1447 pHlp->pfnPrintf(pHlp,
1448 "Gst: %08x %08x %08x %08x %08x%s\n"
1449 "Hst: %08x %08x %08x %08x\n",
1450 0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1451 i <= cExtMax ? "" : "*",
1452 Host.eax, Host.ebx, Host.ecx, Host.edx);
1453 }
1454
1455 /*
1456 * Understandable output
1457 */
1458 if (iVerbosity && cExtMax >= 0)
1459 {
1460 Guest = pVM->cpum.s.aGuestCpuIdExt[0];
1461 pHlp->pfnPrintf(pHlp,
1462 "Ext Name: %.4s%.4s%.4s\n"
1463 "Ext Supports: 0x80000000-%#010x\n",
1464 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
1465 }
1466
1467 if (iVerbosity && cExtMax >= 1)
1468 {
1469 Guest = pVM->cpum.s.aGuestCpuIdExt[1];
1470 uint32_t uEAX = Guest.eax;
1471 pHlp->pfnPrintf(pHlp,
1472 "Family: %d \tExtended: %d \tEffective: %d\n"
1473 "Model: %d \tExtended: %d \tEffective: %d\n"
1474 "Stepping: %d\n"
1475 "Brand ID: %#05x\n",
1476 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
1477 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
1478 ASMGetCpuStepping(uEAX),
1479 Guest.ebx & 0xfff);
1480
1481 if (iVerbosity == 1)
1482 {
1483 uint32_t uEDX = Guest.edx;
1484 pHlp->pfnPrintf(pHlp, "Features EDX: ");
1485 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
1486 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
1487 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
1488 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
1489 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
1490 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
1491 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
1492 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
1493 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
1494 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
1495 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
1496 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SCR");
1497 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
1498 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
1499 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
1500 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
1501 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
1502 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
1503 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " 18");
1504 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " 19");
1505 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " NX");
1506 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " 21");
1507 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ExtMMX");
1508 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
1509 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
1510 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " FastFXSR");
1511 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " Page1GB");
1512 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " RDTSCP");
1513 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " 28");
1514 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " LongMode");
1515 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " Ext3DNow");
1516 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 3DNow");
1517 pHlp->pfnPrintf(pHlp, "\n");
1518
1519 uint32_t uECX = Guest.ecx;
1520 pHlp->pfnPrintf(pHlp, "Features ECX: ");
1521 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
1522 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " CMPL");
1523 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " SVM");
1524 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " ExtAPIC");
1525 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " CR8L");
1526 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " ABM");
1527 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SSE4A");
1528 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MISALNSSE");
1529 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " 3DNOWPRF");
1530 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " OSVW");
1531 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " IBS");
1532 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SSE5");
1533 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " SKINIT");
1534 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " WDT");
1535 for (unsigned iBit = 5; iBit < 32; iBit++)
1536 if (uECX & RT_BIT(iBit))
1537 pHlp->pfnPrintf(pHlp, " %d", iBit);
1538 pHlp->pfnPrintf(pHlp, "\n");
1539 }
1540 else
1541 {
1542 ASMCpuId(0x80000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1543
1544 uint32_t uEdxGst = Guest.edx;
1545 uint32_t uEdxHst = Host.edx;
1546 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
1547 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
1548 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
1549 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
1550 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
1551 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
1552 pHlp->pfnPrintf(pHlp, "MSR - K86 Model Specific Registers = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
1553 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
1554 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
1555 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
1556 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
1557 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
1558 pHlp->pfnPrintf(pHlp, "SEP - SYSCALL and SYSRET = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
1559 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
1560 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
1561 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
1562 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
1563 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
1564 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
1565 pHlp->pfnPrintf(pHlp, "18 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
1566 pHlp->pfnPrintf(pHlp, "19 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
1567 pHlp->pfnPrintf(pHlp, "NX - No-Execute Page Protection = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
1568 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
1569 pHlp->pfnPrintf(pHlp, "AXMMX - AMD Extensions to MMX Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
1570 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
1571 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
1572 pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
1573 pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
1574 pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));
1575 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28)));
1576 pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));
1577 pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));
1578 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));
1579
1580 uint32_t uEcxGst = Guest.ecx;
1581 uint32_t uEcxHst = Host.ecx;
1582 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0)));
1583 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1)));
1584 pHlp->pfnPrintf(pHlp, "SVM - AMD VM Extensions = %d (%d)\n", !!(uEcxGst & RT_BIT( 2)), !!(uEcxHst & RT_BIT( 2)));
1585 pHlp->pfnPrintf(pHlp, "APIC registers starting at 0x400 = %d (%d)\n", !!(uEcxGst & RT_BIT( 3)), !!(uEcxHst & RT_BIT( 3)));
1586 pHlp->pfnPrintf(pHlp, "AltMovCR8 - LOCK MOV CR0 means MOV CR8 = %d (%d)\n", !!(uEcxGst & RT_BIT( 4)), !!(uEcxHst & RT_BIT( 4)));
1587 pHlp->pfnPrintf(pHlp, "Advanced bit manipulation = %d (%d)\n", !!(uEcxGst & RT_BIT( 5)), !!(uEcxHst & RT_BIT( 5)));
1588 pHlp->pfnPrintf(pHlp, "SSE4A instruction support = %d (%d)\n", !!(uEcxGst & RT_BIT( 6)), !!(uEcxHst & RT_BIT( 6)));
1589 pHlp->pfnPrintf(pHlp, "Misaligned SSE mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 7)), !!(uEcxHst & RT_BIT( 7)));
1590 pHlp->pfnPrintf(pHlp, "PREFETCH and PREFETCHW instruction = %d (%d)\n", !!(uEcxGst & RT_BIT( 8)), !!(uEcxHst & RT_BIT( 8)));
1591 pHlp->pfnPrintf(pHlp, "OS visible workaround = %d (%d)\n", !!(uEcxGst & RT_BIT( 9)), !!(uEcxHst & RT_BIT( 9)));
1592 pHlp->pfnPrintf(pHlp, "Instruction based sampling = %d (%d)\n", !!(uEcxGst & RT_BIT(10)), !!(uEcxHst & RT_BIT(10)));
1593 pHlp->pfnPrintf(pHlp, "SSE5 support = %d (%d)\n", !!(uEcxGst & RT_BIT(11)), !!(uEcxHst & RT_BIT(11)));
1594 pHlp->pfnPrintf(pHlp, "SKINIT, STGI, and DEV support = %d (%d)\n", !!(uEcxGst & RT_BIT(12)), !!(uEcxHst & RT_BIT(12)));
1595 pHlp->pfnPrintf(pHlp, "Watchdog timer support. = %d (%d)\n", !!(uEcxGst & RT_BIT(13)), !!(uEcxHst & RT_BIT(13)));
1596 pHlp->pfnPrintf(pHlp, "31:14 - Reserved = %#x (%#x)\n", uEcxGst >> 14, uEcxHst >> 14);
1597 }
1598 }
1599
1600 if (iVerbosity && cExtMax >= 2)
1601 {
1602 char szString[4*4*3+1] = {0};
1603 uint32_t *pu32 = (uint32_t *)szString;
1604 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].eax;
1605 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ebx;
1606 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ecx;
1607 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].edx;
1608 if (cExtMax >= 3)
1609 {
1610 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].eax;
1611 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ebx;
1612 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ecx;
1613 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].edx;
1614 }
1615 if (cExtMax >= 4)
1616 {
1617 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].eax;
1618 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ebx;
1619 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ecx;
1620 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].edx;
1621 }
1622 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString);
1623 }
1624
1625 if (iVerbosity && cExtMax >= 5)
1626 {
1627 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[5].eax;
1628 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[5].ebx;
1629 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[5].ecx;
1630 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[5].edx;
1631 char sz1[32];
1632 char sz2[32];
1633
1634 pHlp->pfnPrintf(pHlp,
1635 "TLB 2/4M Instr/Uni: %s %3d entries\n"
1636 "TLB 2/4M Data: %s %3d entries\n",
1637 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
1638 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
1639 pHlp->pfnPrintf(pHlp,
1640 "TLB 4K Instr/Uni: %s %3d entries\n"
1641 "TLB 4K Data: %s %3d entries\n",
1642 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
1643 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
1644 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
1645 "L1 Instr Cache Lines Per Tag: %d\n"
1646 "L1 Instr Cache Associativity: %s\n"
1647 "L1 Instr Cache Size: %d KB\n",
1648 (uEDX >> 0) & 0xff,
1649 (uEDX >> 8) & 0xff,
1650 getCacheAss((uEDX >> 16) & 0xff, sz1),
1651 (uEDX >> 24) & 0xff);
1652 pHlp->pfnPrintf(pHlp,
1653 "L1 Data Cache Line Size: %d bytes\n"
1654 "L1 Data Cache Lines Per Tag: %d\n"
1655 "L1 Data Cache Associativity: %s\n"
1656 "L1 Data Cache Size: %d KB\n",
1657 (uECX >> 0) & 0xff,
1658 (uECX >> 8) & 0xff,
1659 getCacheAss((uECX >> 16) & 0xff, sz1),
1660 (uECX >> 24) & 0xff);
1661 }
1662
1663 if (iVerbosity && cExtMax >= 6)
1664 {
1665 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[6].eax;
1666 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[6].ebx;
1667 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[6].edx;
1668
1669 pHlp->pfnPrintf(pHlp,
1670 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
1671 "L2 TLB 2/4M Data: %s %4d entries\n",
1672 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
1673 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
1674 pHlp->pfnPrintf(pHlp,
1675 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
1676 "L2 TLB 4K Data: %s %4d entries\n",
1677 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
1678 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
1679 pHlp->pfnPrintf(pHlp,
1680 "L2 Cache Line Size: %d bytes\n"
1681 "L2 Cache Lines Per Tag: %d\n"
1682 "L2 Cache Associativity: %s\n"
1683 "L2 Cache Size: %d KB\n",
1684 (uEDX >> 0) & 0xff,
1685 (uEDX >> 8) & 0xf,
1686 getL2CacheAss((uEDX >> 12) & 0xf),
1687 (uEDX >> 16) & 0xffff);
1688 }
1689
1690 if (iVerbosity && cExtMax >= 7)
1691 {
1692 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[7].edx;
1693
1694 pHlp->pfnPrintf(pHlp, "APM Features: ");
1695 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " TS");
1696 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " FID");
1697 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " VID");
1698 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " TTP");
1699 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TM");
1700 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " STC");
1701 for (unsigned iBit = 6; iBit < 32; iBit++)
1702 if (uEDX & RT_BIT(iBit))
1703 pHlp->pfnPrintf(pHlp, " %d", iBit);
1704 pHlp->pfnPrintf(pHlp, "\n");
1705 }
1706
1707 if (iVerbosity && cExtMax >= 8)
1708 {
1709 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[8].eax;
1710 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[8].ecx;
1711
1712 pHlp->pfnPrintf(pHlp,
1713 "Physical Address Width: %d bits\n"
1714 "Virtual Address Width: %d bits\n",
1715 (uEAX >> 0) & 0xff,
1716 (uEAX >> 8) & 0xff);
1717 pHlp->pfnPrintf(pHlp,
1718 "Physical Core Count: %d\n",
1719 (uECX >> 0) & 0xff);
1720 }
1721
1722
1723 /*
1724 * Centaur.
1725 */
1726 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdCentaur[0].eax & 0xffff;
1727
1728 pHlp->pfnPrintf(pHlp,
1729 "\n"
1730 " RAW Centaur CPUIDs\n"
1731 " Function eax ebx ecx edx\n");
1732 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); i++)
1733 {
1734 Guest = pVM->cpum.s.aGuestCpuIdCentaur[i];
1735 ASMCpuId(0xc0000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1736
1737 pHlp->pfnPrintf(pHlp,
1738 "Gst: %08x %08x %08x %08x %08x%s\n"
1739 "Hst: %08x %08x %08x %08x\n",
1740 0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1741 i <= cCentaurMax ? "" : "*",
1742 Host.eax, Host.ebx, Host.ecx, Host.edx);
1743 }
1744
1745 /*
1746 * Understandable output
1747 */
1748 if (iVerbosity && cCentaurMax >= 0)
1749 {
1750 Guest = pVM->cpum.s.aGuestCpuIdCentaur[0];
1751 pHlp->pfnPrintf(pHlp,
1752 "Centaur Supports: 0xc0000000-%#010x\n",
1753 Guest.eax);
1754 }
1755
1756 if (iVerbosity && cCentaurMax >= 1)
1757 {
1758 ASMCpuId(0xc0000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1759 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdExt[1].edx;
1760 uint32_t uEdxHst = Host.edx;
1761
1762 if (iVerbosity == 1)
1763 {
1764 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
1765 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
1766 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
1767 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
1768 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
1769 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
1770 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
1771 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
1772 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
1773 /* possibly indicating MM/HE and MM/HE-E on older chips... */
1774 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
1775 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
1776 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
1777 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
1778 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
1779 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
1780 for (unsigned iBit = 14; iBit < 32; iBit++)
1781 if (uEdxGst & RT_BIT(iBit))
1782 pHlp->pfnPrintf(pHlp, " %d", iBit);
1783 pHlp->pfnPrintf(pHlp, "\n");
1784 }
1785 else
1786 {
1787 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
1788 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
1789 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
1790 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
1791 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
1792 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
1793 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
1794 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
1795 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
1796 /* possibly indicating MM/HE and MM/HE-E on older chips... */
1797 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
1798 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
1799 pHlp->pfnPrintf(pHlp, "PHE - Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
1800 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
1801 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
1802 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
1803 for (unsigned iBit = 14; iBit < 32; iBit++)
1804 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
1805 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
1806 pHlp->pfnPrintf(pHlp, "\n");
1807 }
1808 }
1809}
1810
1811
1812/**
1813 * Structure used when disassembling and instructions in DBGF.
1814 * This is used so the reader function can get the stuff it needs.
1815 */
1816typedef struct CPUMDISASSTATE
1817{
1818 /** Pointer to the CPU structure. */
1819 PDISCPUSTATE pCpu;
1820 /** The VM handle. */
1821 PVM pVM;
1822 /** Pointer to the first byte in the segemnt. */
1823 RTGCUINTPTR GCPtrSegBase;
1824 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
1825 RTGCUINTPTR GCPtrSegEnd;
1826 /** The size of the segment minus 1. */
1827 RTGCUINTPTR cbSegLimit;
1828 /** Pointer to the current page - HC Ptr. */
1829 void const *pvPageHC;
1830 /** Pointer to the current page - GC Ptr. */
1831 RTGCPTR pvPageGC;
1832 /** The lock information that PGMPhysReleasePageMappingLock needs. */
1833 PGMPAGEMAPLOCK PageMapLock;
1834 /** Whether the PageMapLock is valid or not. */
1835 bool fLocked;
1836 /** 64 bits mode or not. */
1837 bool f64Bits;
1838} CPUMDISASSTATE, *PCPUMDISASSTATE;
1839
1840
1841/**
1842 * Instruction reader.
1843 *
1844 * @returns VBox status code.
1845 * @param PtrSrc Address to read from.
1846 * In our case this is relative to the selector pointed to by the 2nd user argument of uDisCpu.
1847 * @param pu8Dst Where to store the bytes.
1848 * @param cbRead Number of bytes to read.
1849 * @param uDisCpu Pointer to the disassembler cpu state.
1850 * In this context it's always pointer to the Core of a DBGFDISASSTATE.
1851 */
1852static DECLCALLBACK(int) cpumR3DisasInstrRead(RTUINTPTR PtrSrc, uint8_t *pu8Dst, unsigned cbRead, void *uDisCpu)
1853{
1854 PDISCPUSTATE pCpu = (PDISCPUSTATE)uDisCpu;
1855 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pCpu->apvUserData[0];
1856 Assert(cbRead > 0);
1857 for (;;)
1858 {
1859 RTGCUINTPTR GCPtr = PtrSrc + pState->GCPtrSegBase;
1860
1861 /* Need to update the page translation? */
1862 if ( !pState->pvPageHC
1863 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
1864 {
1865 int rc = VINF_SUCCESS;
1866
1867 /* translate the address */
1868 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
1869 if (MMHyperIsInsideArea(pState->pVM, pState->pvPageGC))
1870 {
1871 pState->pvPageHC = MMHyperGC2HC(pState->pVM, pState->pvPageGC);
1872 if (!pState->pvPageHC)
1873 rc = VERR_INVALID_POINTER;
1874 }
1875 else
1876 {
1877 /* Release mapping lock previously acquired. */
1878 if (pState->fLocked)
1879 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
1880 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVM, pState->pvPageGC, &pState->pvPageHC, &pState->PageMapLock);
1881 pState->fLocked = RT_SUCCESS_NP(rc);
1882 }
1883 if (VBOX_FAILURE(rc))
1884 {
1885 pState->pvPageHC = NULL;
1886 return rc;
1887 }
1888 }
1889
1890 /* check the segemnt limit */
1891 if (!pState->f64Bits && PtrSrc > pState->cbSegLimit)
1892 return VERR_OUT_OF_SELECTOR_BOUNDS;
1893
1894 /* calc how much we can read */
1895 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
1896 if (!pState->f64Bits)
1897 {
1898 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
1899 if (cb > cbSeg && cbSeg)
1900 cb = cbSeg;
1901 }
1902 if (cb > cbRead)
1903 cb = cbRead;
1904
1905 /* read and advance */
1906 memcpy(pu8Dst, (char *)pState->pvPageHC + (GCPtr & PAGE_OFFSET_MASK), cb);
1907 cbRead -= cb;
1908 if (!cbRead)
1909 return VINF_SUCCESS;
1910 pu8Dst += cb;
1911 PtrSrc += cb;
1912 }
1913}
1914
1915
1916/**
1917 * Disassemble an instruction and return the information in the provided structure.
1918 *
1919 * @returns VBox status code.
1920 * @param pVM VM Handle
1921 * @param pCtx CPU context
1922 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
1923 * @param pCpu Disassembly state
1924 * @param pszPrefix String prefix for logging (debug only)
1925 *
1926 */
1927CPUMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
1928{
1929 CPUMDISASSTATE State;
1930 int rc;
1931
1932 const PGMMODE enmMode = PGMGetGuestMode(pVM);
1933 State.pCpu = pCpu;
1934 State.pvPageGC = 0;
1935 State.pvPageHC = NULL;
1936 State.pVM = pVM;
1937 State.fLocked = false;
1938 State.f64Bits = false;
1939
1940 /*
1941 * Get selector information.
1942 */
1943 if ( (pCtx->cr0 & X86_CR0_PE)
1944 && pCtx->eflags.Bits.u1VM == 0)
1945 {
1946 if (CPUMAreHiddenSelRegsValid(pVM))
1947 {
1948 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->csHid.Attr.n.u1Long;
1949 State.GCPtrSegBase = pCtx->csHid.u64Base;
1950 State.GCPtrSegEnd = pCtx->csHid.u32Limit + 1 + (RTGCUINTPTR)pCtx->csHid.u64Base;
1951 State.cbSegLimit = pCtx->csHid.u32Limit;
1952 pCpu->mode = (State.f64Bits)
1953 ? CPUMODE_64BIT
1954 : pCtx->csHid.Attr.n.u1DefBig
1955 ? CPUMODE_32BIT
1956 : CPUMODE_16BIT;
1957 }
1958 else
1959 {
1960 SELMSELINFO SelInfo;
1961
1962 rc = SELMR3GetShadowSelectorInfo(pVM, pCtx->cs, &SelInfo);
1963 if (!VBOX_SUCCESS(rc))
1964 {
1965 AssertMsgFailed(("SELMR3GetShadowSelectorInfo failed for %04X:%VGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
1966 return rc;
1967 }
1968
1969 /*
1970 * Validate the selector.
1971 */
1972 rc = SELMSelInfoValidateCS(&SelInfo, pCtx->ss);
1973 if (!VBOX_SUCCESS(rc))
1974 {
1975 AssertMsgFailed(("SELMSelInfoValidateCS failed for %04X:%VGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
1976 return rc;
1977 }
1978 State.GCPtrSegBase = SelInfo.GCPtrBase;
1979 State.GCPtrSegEnd = SelInfo.cbLimit + 1 + (RTGCUINTPTR)SelInfo.GCPtrBase;
1980 State.cbSegLimit = SelInfo.cbLimit;
1981 pCpu->mode = SelInfo.Raw.Gen.u1DefBig ? CPUMODE_32BIT : CPUMODE_16BIT;
1982 }
1983 }
1984 else
1985 {
1986 /* real or V86 mode */
1987 pCpu->mode = CPUMODE_16BIT;
1988 State.GCPtrSegBase = pCtx->cs * 16;
1989 State.GCPtrSegEnd = 0xFFFFFFFF;
1990 State.cbSegLimit = 0xFFFFFFFF;
1991 }
1992
1993 /*
1994 * Disassemble the instruction.
1995 */
1996 pCpu->pfnReadBytes = cpumR3DisasInstrRead;
1997 pCpu->apvUserData[0] = &State;
1998
1999 uint32_t cbInstr;
2000#ifndef LOG_ENABLED
2001 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, NULL);
2002 if (VBOX_SUCCESS(rc))
2003 {
2004#else
2005 char szOutput[160];
2006 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, &szOutput[0]);
2007 if (VBOX_SUCCESS(rc))
2008 {
2009 /* log it */
2010 if (pszPrefix)
2011 Log(("%s: %s", pszPrefix, szOutput));
2012 else
2013 Log(("%s", szOutput));
2014#endif
2015 rc = VINF_SUCCESS;
2016 }
2017 else
2018 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%VGv rc=%Vrc\n", pCtx->cs, GCPtrPC, rc));
2019
2020 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
2021 if (State.fLocked)
2022 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
2023
2024 return rc;
2025}
2026
2027#ifdef DEBUG
2028
2029/**
2030 * Disassemble an instruction and dump it to the log
2031 *
2032 * @returns VBox status code.
2033 * @param pVM VM Handle
2034 * @param pCtx CPU context
2035 * @param pc GC instruction pointer
2036 * @param prefix String prefix for logging
2037 * @deprecated Use DBGFR3DisasInstrCurrentLog().
2038 *
2039 */
2040CPUMR3DECL(void) CPUMR3DisasmInstr(PVM pVM, PCPUMCTX pCtx, RTGCPTR pc, char *prefix)
2041{
2042 DISCPUSTATE cpu;
2043
2044 CPUMR3DisasmInstrCPU(pVM, pCtx, pc, &cpu, prefix);
2045}
2046
2047/**
2048 * Disassemble an instruction and dump it to the log
2049 *
2050 * @returns VBox status code.
2051 * @param pVM VM Handle
2052 * @param pCtx CPU context
2053 * @param pc GC instruction pointer
2054 * @param prefix String prefix for logging
2055 * @param nrInstructions
2056 *
2057 */
2058CPUMR3DECL(void) CPUMR3DisasmBlock(PVM pVM, PCPUMCTX pCtx, RTGCPTR pc, char *prefix, int nrInstructions)
2059{
2060 for(int i=0;i<nrInstructions;i++)
2061 {
2062 DISCPUSTATE cpu;
2063
2064 CPUMR3DisasmInstrCPU(pVM, pCtx, pc, &cpu, prefix);
2065 pc += cpu.opsize;
2066 }
2067}
2068
2069#endif /* DEBUG */
2070
2071#ifdef DEBUG
2072/**
2073 * Debug helper - Saves guest context on raw mode entry (for fatal dump)
2074 *
2075 * @internal
2076 */
2077CPUMR3DECL(void) CPUMR3SaveEntryCtx(PVM pVM)
2078{
2079 pVM->cpum.s.GuestEntry = pVM->cpum.s.Guest;
2080}
2081#endif /* DEBUG */
2082
2083
2084/**
2085 * API for controlling a few of the CPU features found in CR4.
2086 *
2087 * Currently only X86_CR4_TSD is accepted as input.
2088 *
2089 * @returns VBox status code.
2090 *
2091 * @param pVM The VM handle.
2092 * @param fOr The CR4 OR mask.
2093 * @param fAnd The CR4 AND mask.
2094 */
2095CPUMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
2096{
2097 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
2098 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
2099
2100 pVM->cpum.s.CR4.OrMask &= fAnd;
2101 pVM->cpum.s.CR4.OrMask |= fOr;
2102
2103 return VINF_SUCCESS;
2104}
2105
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette