VirtualBox

source: vbox/trunk/src/VBox/VMM/CPUM.cpp@ 11964

Last change on this file since 11964 was 11960, checked in by vboxsync, 16 years ago

No need to clear EFER on reset. Set the TR attribute field to X86_SEL_TYPE_SYS_386_TSS_BUSY.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 106.1 KB
Line 
1/* $Id: CPUM.cpp 11960 2008-09-02 07:53:22Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_cpum
23 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
24 * also responsible for lazy FPU handling and some of the context loading
25 * in raw mode.
26 *
27 * There are three CPU contexts, the most important one is the guest one (GC).
28 * When running in raw-mode (RC) there is a special hyper context for the VMM
29 * that floats around inside the guest address space. When running in raw-mode
30 * or when using 64-bit guests on a 32-bit host, CPUM also maintains a host
31 * context for saving and restoring registers accross world switches. This latter
32 * is done in cooperation with the world switcher (@see pg_vmm).
33 */
34
35/*******************************************************************************
36* Header Files *
37*******************************************************************************/
38#define LOG_GROUP LOG_GROUP_CPUM
39#include <VBox/cpum.h>
40#include <VBox/cpumdis.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/mm.h>
44#include <VBox/selm.h>
45#include <VBox/dbgf.h>
46#include <VBox/patm.h>
47#include <VBox/ssm.h>
48#include "CPUMInternal.h"
49#include <VBox/vm.h>
50
51#include <VBox/param.h>
52#include <VBox/dis.h>
53#include <VBox/err.h>
54#include <VBox/log.h>
55#include <iprt/assert.h>
56#include <iprt/asm.h>
57#include <iprt/string.h>
58#include <iprt/mp.h>
59#include <iprt/cpuset.h>
60
61
62/*******************************************************************************
63* Defined Constants And Macros *
64*******************************************************************************/
65/** The saved state version. */
66#define CPUM_SAVED_STATE_VERSION_VER1_6 6
67#define CPUM_SAVED_STATE_VERSION 8
68
69
70/*******************************************************************************
71* Structures and Typedefs *
72*******************************************************************************/
73
74/**
75 * What kind of cpu info dump to perform.
76 */
77typedef enum CPUMDUMPTYPE
78{
79 CPUMDUMPTYPE_TERSE,
80 CPUMDUMPTYPE_DEFAULT,
81 CPUMDUMPTYPE_VERBOSE
82
83} CPUMDUMPTYPE;
84/** Pointer to a cpu info dump type. */
85typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
86
87/*******************************************************************************
88* Internal Functions *
89*******************************************************************************/
90static int cpumR3CpuIdInit(PVM pVM);
91static DECLCALLBACK(int) cpumR3Save(PVM pVM, PSSMHANDLE pSSM);
92static DECLCALLBACK(int) cpumR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
93static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
94static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
95static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
96static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
97static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
98static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
99
100
101/**
102 * Initializes the CPUM.
103 *
104 * @returns VBox status code.
105 * @param pVM The VM to operate on.
106 */
107CPUMR3DECL(int) CPUMR3Init(PVM pVM)
108{
109 LogFlow(("CPUMR3Init\n"));
110
111 /*
112 * Assert alignment and sizes.
113 */
114 AssertRelease(!(RT_OFFSETOF(VM, cpum.s) & 31));
115 AssertRelease(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
116
117 /*
118 * Setup any fixed pointers and offsets.
119 */
120 pVM->cpum.s.offVM = RT_OFFSETOF(VM, cpum);
121 pVM->cpum.s.pCPUMHC = &pVM->cpum.s;
122 pVM->cpum.s.pHyperCoreR3 = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
123 pVM->cpum.s.pHyperCoreR0 = VM_R0_ADDR(pVM, CPUMCTX2CORE(&pVM->cpum.s.Hyper));
124
125 /* Hidden selector registers are invalid by default. */
126 pVM->cpum.s.fValidHiddenSelRegs = false;
127
128 /*
129 * Check that the CPU supports the minimum features we require.
130 */
131 /** @todo check the contract! */
132 if (!ASMHasCpuId())
133 {
134 Log(("The CPU doesn't support CPUID!\n"));
135 return VERR_UNSUPPORTED_CPU;
136 }
137 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
138 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
139
140 /* Setup the CR4 AND and OR masks used in the switcher */
141 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
142 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
143 {
144 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
145 /* No FXSAVE implies no SSE */
146 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
147 pVM->cpum.s.CR4.OrMask = 0;
148 }
149 else
150 {
151 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
152 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
153 }
154
155 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
156 {
157 Log(("The CPU doesn't support MMX!\n"));
158 return VERR_UNSUPPORTED_CPU;
159 }
160 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
161 {
162 Log(("The CPU doesn't support TSC!\n"));
163 return VERR_UNSUPPORTED_CPU;
164 }
165 /* Bogus on AMD? */
166 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
167 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
168
169 /*
170 * Setup hypervisor startup values.
171 */
172
173 /*
174 * Register saved state data item.
175 */
176 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
177 NULL, cpumR3Save, NULL,
178 NULL, cpumR3Load, NULL);
179 if (VBOX_FAILURE(rc))
180 return rc;
181
182 /* Query the CPU manufacturer. */
183 uint32_t uEAX, uEBX, uECX, uEDX;
184 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
185 if ( uEAX >= 1
186 && uEBX == X86_CPUID_VENDOR_AMD_EBX
187 && uECX == X86_CPUID_VENDOR_AMD_ECX
188 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
189 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_AMD;
190 else if ( uEAX >= 1
191 && uEBX == X86_CPUID_VENDOR_INTEL_EBX
192 && uECX == X86_CPUID_VENDOR_INTEL_ECX
193 && uEDX == X86_CPUID_VENDOR_INTEL_EDX)
194 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_INTEL;
195 else /** @todo Via */
196 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_UNKNOWN;
197
198 /*
199 * Register info handlers.
200 */
201 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
202 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
203 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
204 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
205 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
206 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
207
208 /*
209 * Initialize the Guest CPU state.
210 */
211 rc = cpumR3CpuIdInit(pVM);
212 if (VBOX_FAILURE(rc))
213 return rc;
214 CPUMR3Reset(pVM);
215 return VINF_SUCCESS;
216}
217
218
219/**
220 * Initializes the emulated CPU's cpuid information.
221 *
222 * @returns VBox status code.
223 * @param pVM The VM to operate on.
224 */
225static int cpumR3CpuIdInit(PVM pVM)
226{
227 PCPUM pCPUM = &pVM->cpum.s;
228 uint32_t i;
229
230 /*
231 * Get the host CPUIDs.
232 */
233 for (i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
234 ASMCpuId_Idx_ECX(i, 0,
235 &pCPUM->aGuestCpuIdStd[i].eax, &pCPUM->aGuestCpuIdStd[i].ebx,
236 &pCPUM->aGuestCpuIdStd[i].ecx, &pCPUM->aGuestCpuIdStd[i].edx);
237 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
238 ASMCpuId(0x80000000 + i,
239 &pCPUM->aGuestCpuIdExt[i].eax, &pCPUM->aGuestCpuIdExt[i].ebx,
240 &pCPUM->aGuestCpuIdExt[i].ecx, &pCPUM->aGuestCpuIdExt[i].edx);
241 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
242 ASMCpuId(0xc0000000 + i,
243 &pCPUM->aGuestCpuIdCentaur[i].eax, &pCPUM->aGuestCpuIdCentaur[i].ebx,
244 &pCPUM->aGuestCpuIdCentaur[i].ecx, &pCPUM->aGuestCpuIdCentaur[i].edx);
245
246
247 /*
248 * Only report features we can support.
249 */
250 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
251 | X86_CPUID_FEATURE_EDX_VME
252 | X86_CPUID_FEATURE_EDX_DE
253 | X86_CPUID_FEATURE_EDX_PSE
254 | X86_CPUID_FEATURE_EDX_TSC
255 | X86_CPUID_FEATURE_EDX_MSR
256 //| X86_CPUID_FEATURE_EDX_PAE - not implemented yet.
257 | X86_CPUID_FEATURE_EDX_MCE
258 | X86_CPUID_FEATURE_EDX_CX8
259 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
260 /** @note we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
261 //| X86_CPUID_FEATURE_EDX_SEP
262 | X86_CPUID_FEATURE_EDX_MTRR
263 | X86_CPUID_FEATURE_EDX_PGE
264 | X86_CPUID_FEATURE_EDX_MCA
265 | X86_CPUID_FEATURE_EDX_CMOV
266 | X86_CPUID_FEATURE_EDX_PAT
267 | X86_CPUID_FEATURE_EDX_PSE36
268 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
269 | X86_CPUID_FEATURE_EDX_CLFSH
270 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
271 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.
272 | X86_CPUID_FEATURE_EDX_MMX
273 | X86_CPUID_FEATURE_EDX_FXSR
274 | X86_CPUID_FEATURE_EDX_SSE
275 | X86_CPUID_FEATURE_EDX_SSE2
276 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
277 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.
278 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
279 //| X86_CPUID_FEATURE_EDX_PBE - no pneding break enabled.
280 | 0;
281 pCPUM->aGuestCpuIdStd[1].ecx &= 0//X86_CPUID_FEATURE_ECX_SSE3 - not supported by the recompiler yet.
282 | X86_CPUID_FEATURE_ECX_MONITOR
283 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
284 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
285 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
286 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
287 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
288 /* ECX Bit 13 - CX16 - CMPXCHG16B. */
289 //| X86_CPUID_FEATURE_ECX_CX16
290 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
291 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
292 /* ECX Bit 23 - POPCOUNT instruction. */
293 //| X86_CPUID_FEATURE_ECX_POPCOUNT
294 | 0;
295
296 /* ASSUMES that this is ALWAYS the AMD define feature set if present. */
297 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
298 | X86_CPUID_AMD_FEATURE_EDX_VME
299 | X86_CPUID_AMD_FEATURE_EDX_DE
300 | X86_CPUID_AMD_FEATURE_EDX_PSE
301 | X86_CPUID_AMD_FEATURE_EDX_TSC
302 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
303 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.
304 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
305 | X86_CPUID_AMD_FEATURE_EDX_CX8
306 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
307 /** @note we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
308 //| X86_CPUID_AMD_FEATURE_EDX_SEP
309 | X86_CPUID_AMD_FEATURE_EDX_MTRR
310 | X86_CPUID_AMD_FEATURE_EDX_PGE
311 | X86_CPUID_AMD_FEATURE_EDX_MCA
312 | X86_CPUID_AMD_FEATURE_EDX_CMOV
313 | X86_CPUID_AMD_FEATURE_EDX_PAT
314 | X86_CPUID_AMD_FEATURE_EDX_PSE36
315 //| X86_CPUID_AMD_FEATURE_EDX_NX - not virtualized, requires PAE.
316 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
317 | X86_CPUID_AMD_FEATURE_EDX_MMX
318 | X86_CPUID_AMD_FEATURE_EDX_FXSR
319 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
320 //| X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
321 //| X86_CPUID_AMD_FEATURE_EDX_RDTSCP
322 //| X86_CPUID_AMD_FEATURE_EDX_LONG_MODE - not yet.
323 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
324 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
325 | 0;
326 pCPUM->aGuestCpuIdExt[1].ecx &= 0
327 //| X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF
328 //| X86_CPUID_AMD_FEATURE_ECX_CMPL
329 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
330 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
331 //| X86_CPUID_AMD_FEATURE_ECX_CR8L
332 //| X86_CPUID_AMD_FEATURE_ECX_ABM
333 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A
334 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
335 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
336 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
337 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
338 //| X86_CPUID_AMD_FEATURE_ECX_WDT
339 | 0;
340
341 /*
342 * Hide HTT, multicode, SMP, whatever.
343 * (APIC-ID := 0 and #LogCpus := 0)
344 */
345 pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
346
347 /* Cpuid 2:
348 * Intel: Cache and TLB information
349 * AMD: Reserved
350 * Safe to expose
351 */
352
353 /* Cpuid 3:
354 * Intel: EAX, EBX - reserved
355 * ECX, EDX - Processor Serial Number if available, otherwise reserved
356 * AMD: Reserved
357 * Safe to expose
358 */
359 if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
360 pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
361
362 /* Cpuid 4:
363 * Intel: Deterministic Cache Parameters Leaf
364 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 0
365 * AMD: Reserved
366 * Safe to expose, except for EAX:
367 * Bits 25-14: Maximum number of threads sharing this cache in a physical package (see note)**
368 * Bits 31-26: Maximum number of processor cores in this physical package**
369 */
370 pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
371 pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
372
373 /* Cpuid 5: Monitor/mwait Leaf
374 * Intel: ECX, EDX - reserved
375 * EAX, EBX - Smallest and largest monitor line size
376 * AMD: EDX - reserved
377 * EAX, EBX - Smallest and largest monitor line size
378 * ECX - extensions (ignored for now)
379 * Safe to expose
380 */
381 if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
382 pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
383
384 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
385
386 /*
387 * Determine the default.
388 *
389 * Intel returns values of the highest standard function, while AMD
390 * returns zeros. VIA on the other hand seems to returning nothing or
391 * perhaps some random garbage, we don't try to duplicate this behavior.
392 */
393 ASMCpuId(pCPUM->aGuestCpuIdStd[0].eax + 10,
394 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
395 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
396
397 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
398 * Safe to pass on to the guest.
399 *
400 * Intel: 0x800000005 reserved
401 * 0x800000006 L2 cache information
402 * AMD: 0x800000005 L1 cache information
403 * 0x800000006 L2/L3 cache information
404 */
405
406 /* Cpuid 0x800000007:
407 * AMD: EAX, EBX, ECX - reserved
408 * EDX: Advanced Power Management Information
409 * Intel: Reserved
410 */
411 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
412 {
413 Assert(pVM->cpum.s.enmCPUVendor != CPUMCPUVENDOR_INVALID);
414
415 pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
416
417 if (pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
418 {
419 /* Only expose the TSC invariant capability bit to the guest. */
420 pCPUM->aGuestCpuIdExt[7].edx &= 0
421 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
422 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
423 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
424 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
425 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
426 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
427 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
428 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
429 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
430 | 0;
431 }
432 else
433 pCPUM->aGuestCpuIdExt[7].edx = 0;
434 }
435
436 /* Cpuid 0x800000008:
437 * AMD: EBX, EDX - reserved
438 * EAX: Virtual/Physical address Size
439 * ECX: Number of cores + APICIdCoreIdSize
440 * Intel: EAX: Virtual/Physical address Size
441 * EBX, ECX, EDX - reserved
442 */
443 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
444 {
445 /* Only expose the virtual and physical address sizes to the guest. (EAX completely) */
446 pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
447 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
448 * NC (0-7) Number of cores; 0 equals 1 core */
449 pCPUM->aGuestCpuIdExt[8].ecx = 0;
450 }
451
452 /*
453 * Limit it the number of entries and fill the remaining with the defaults.
454 *
455 * The limits are masking off stuff about power saving and similar, this
456 * is perhaps a bit crudely done as there is probably some relatively harmless
457 * info too in these leaves (like words about having a constant TSC).
458 */
459#if 0
460 /** @todo NT4 installation regression - investigate */
461 if (pCPUM->aGuestCpuIdStd[0].eax > 5)
462 pCPUM->aGuestCpuIdStd[0].eax = 5;
463#else
464 if (pCPUM->aGuestCpuIdStd[0].eax > 2)
465 pCPUM->aGuestCpuIdStd[0].eax = 2;
466#endif
467 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
468 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
469
470 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
471 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
472 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
473 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
474 : 0;
475 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
476 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
477
478 /*
479 * Workaround for missing cpuid(0) patches: If we miss to patch a cpuid(0).eax then
480 * Linux tries to determine the number of processors from (cpuid(4).eax >> 26) + 1.
481 * We currently don't support more than 1 processor.
482 */
483 pCPUM->aGuestCpuIdStd[4].eax = 0;
484
485 /*
486 * Centaur stuff (VIA).
487 *
488 * The important part here (we think) is to make sure the 0xc0000000
489 * function returns 0xc0000001. As for the features, we don't currently
490 * let on about any of those... 0xc0000002 seems to be some
491 * temperature/hz/++ stuff, include it as well (static).
492 */
493 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
494 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
495 {
496 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
497 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
498 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
499 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
500 i++)
501 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
502 }
503 else
504 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
505 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
506
507
508 /*
509 * Load CPUID overrides from configuration.
510 */
511 PCPUMCPUID pCpuId = &pCPUM->aGuestCpuIdStd[0];
512 uint32_t cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdStd);
513 for (i=0;; )
514 {
515 while (cElements-- > 0)
516 {
517 PCFGMNODE pNode = CFGMR3GetChildF(CFGMR3GetRoot(pVM), "CPUM/CPUID/%RX32", i);
518 if (pNode)
519 {
520 uint32_t u32;
521 int rc = CFGMR3QueryU32(pNode, "eax", &u32);
522 if (VBOX_SUCCESS(rc))
523 pCpuId->eax = u32;
524 else
525 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
526
527 rc = CFGMR3QueryU32(pNode, "ebx", &u32);
528 if (VBOX_SUCCESS(rc))
529 pCpuId->ebx = u32;
530 else
531 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
532
533 rc = CFGMR3QueryU32(pNode, "ecx", &u32);
534 if (VBOX_SUCCESS(rc))
535 pCpuId->ecx = u32;
536 else
537 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
538
539 rc = CFGMR3QueryU32(pNode, "edx", &u32);
540 if (VBOX_SUCCESS(rc))
541 pCpuId->edx = u32;
542 else
543 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
544 }
545 pCpuId++;
546 i++;
547 }
548
549 /* next */
550 if ((i & UINT32_C(0xc0000000)) == 0)
551 {
552 pCpuId = &pCPUM->aGuestCpuIdExt[0];
553 cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
554 i = UINT32_C(0x80000000);
555 }
556 else if ((i & UINT32_C(0xc0000000)) == UINT32_C(0x80000000))
557 {
558 pCpuId = &pCPUM->aGuestCpuIdCentaur[0];
559 cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
560 i = UINT32_C(0xc0000000);
561 }
562 else
563 break;
564 }
565
566 /* Check if PAE was explicitely enabled by the user. */
567 bool fEnable = false;
568 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable);
569 if (VBOX_SUCCESS(rc) && fEnable)
570 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
571
572 /*
573 * Log the cpuid and we're good.
574 */
575 RTCPUSET OnlineSet;
576 LogRel(("Logical host processors: %d, processor active mask: %016RX64\n",
577 (int)RTMpGetCount(), RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
578 LogRel(("************************* CPUID dump ************************\n"));
579 DBGFR3Info(pVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
580 LogRel(("\n"));
581 DBGFR3InfoLog(pVM, "cpuid", "verbose"); /* macro */
582 LogRel(("******************** End of CPUID dump **********************\n"));
583 return VINF_SUCCESS;
584}
585
586
587
588
589/**
590 * Applies relocations to data and code managed by this
591 * component. This function will be called at init and
592 * whenever the VMM need to relocate it self inside the GC.
593 *
594 * The CPUM will update the addresses used by the switcher.
595 *
596 * @param pVM The VM.
597 */
598CPUMR3DECL(void) CPUMR3Relocate(PVM pVM)
599{
600 LogFlow(("CPUMR3Relocate\n"));
601 /*
602 * Switcher pointers.
603 */
604 pVM->cpum.s.pCPUMGC = VM_GUEST_ADDR(pVM, &pVM->cpum.s);
605 pVM->cpum.s.pHyperCoreGC = MMHyperCCToRC(pVM, pVM->cpum.s.pHyperCoreR3);
606 Assert(pVM->cpum.s.pHyperCoreGC != NIL_RTGCPTR);
607}
608
609
610/**
611 * Queries the pointer to the internal CPUMCTX structure
612 *
613 * @returns VBox status code.
614 * @param pVM Handle to the virtual machine.
615 * @param ppCtx Receives the CPUMCTX GC pointer when successful.
616 */
617CPUMR3DECL(int) CPUMR3QueryGuestCtxGCPtr(PVM pVM, RCPTRTYPE(PCPUMCTX) *ppCtx)
618{
619 LogFlow(("CPUMR3QueryGuestCtxGCPtr\n"));
620 /*
621 * Store the address. (Later we might check how's calling, thus the RC.)
622 */
623 *ppCtx = VM_GUEST_ADDR(pVM, &pVM->cpum.s.Guest);
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Terminates the CPUM.
630 *
631 * Termination means cleaning up and freeing all resources,
632 * the VM it self is at this point powered off or suspended.
633 *
634 * @returns VBox status code.
635 * @param pVM The VM to operate on.
636 */
637CPUMR3DECL(int) CPUMR3Term(PVM pVM)
638{
639 /** @todo ? */
640 return 0;
641}
642
643
644/**
645 * Resets the CPU.
646 *
647 * @returns VINF_SUCCESS.
648 * @param pVM The VM handle.
649 */
650CPUMR3DECL(void) CPUMR3Reset(PVM pVM)
651{
652 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
653
654 /*
655 * Initialize everything to ZERO first.
656 */
657 uint32_t fUseFlags = pVM->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
658 memset(pCtx, 0, sizeof(*pCtx));
659 pVM->cpum.s.fUseFlags = fUseFlags;
660
661 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
662 pCtx->eip = 0x0000fff0;
663 pCtx->edx = 0x00000600; /* P6 processor */
664 pCtx->eflags.Bits.u1Reserved0 = 1;
665
666 pCtx->cs = 0xf000;
667 pCtx->csHid.u64Base = UINT64_C(0xffff0000);
668 pCtx->csHid.u32Limit = 0x0000ffff;
669 pCtx->csHid.Attr.n.u1DescType = 1; /* code/data segment */
670 pCtx->csHid.Attr.n.u1Present = 1;
671 pCtx->csHid.Attr.n.u4Type = X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
672
673 pCtx->dsHid.u32Limit = 0x0000ffff;
674 pCtx->dsHid.Attr.n.u1DescType = 1; /* code/data segment */
675 pCtx->dsHid.Attr.n.u1Present = 1;
676 pCtx->dsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
677
678 pCtx->esHid.u32Limit = 0x0000ffff;
679 pCtx->esHid.Attr.n.u1DescType = 1; /* code/data segment */
680 pCtx->esHid.Attr.n.u1Present = 1;
681 pCtx->esHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
682
683 pCtx->fsHid.u32Limit = 0x0000ffff;
684 pCtx->fsHid.Attr.n.u1DescType = 1; /* code/data segment */
685 pCtx->fsHid.Attr.n.u1Present = 1;
686 pCtx->fsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
687
688 pCtx->gsHid.u32Limit = 0x0000ffff;
689 pCtx->gsHid.Attr.n.u1DescType = 1; /* code/data segment */
690 pCtx->gsHid.Attr.n.u1Present = 1;
691 pCtx->gsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
692
693 pCtx->ssHid.u32Limit = 0x0000ffff;
694 pCtx->ssHid.Attr.n.u1Present = 1;
695 pCtx->ssHid.Attr.n.u1DescType = 1; /* code/data segment */
696 pCtx->ssHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
697
698 pCtx->idtr.cbIdt = 0xffff;
699 pCtx->gdtr.cbGdt = 0xffff;
700
701 pCtx->ldtrHid.u32Limit = 0xffff;
702 pCtx->ldtrHid.Attr.n.u1Present = 1;
703 pCtx->ldtrHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
704
705 pCtx->trHid.u32Limit = 0xffff;
706 pCtx->trHid.Attr.n.u1Present = 1;
707 pCtx->trHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
708
709 pCtx->dr6 = UINT32_C(0xFFFF0FF0);
710 pCtx->dr7 = 0x400;
711
712 pCtx->fpu.FTW = 0xff; /* All tags are set, i.e. the regs are empty. */
713 pCtx->fpu.FCW = 0x37f;
714
715 /* Init PAT MSR */
716 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
717}
718
719
720/**
721 * Execute state save operation.
722 *
723 * @returns VBox status code.
724 * @param pVM VM Handle.
725 * @param pSSM SSM operation handle.
726 */
727static DECLCALLBACK(int) cpumR3Save(PVM pVM, PSSMHANDLE pSSM)
728{
729 /* Set the size of RTGCPTR for use of SSMR3Get/PutGCPtr. */
730 SSMR3SetGCPtrSize(pSSM, sizeof(RTGCPTR));
731
732 /*
733 * Save.
734 */
735 SSMR3PutMem(pSSM, &pVM->cpum.s.Hyper, sizeof(pVM->cpum.s.Hyper));
736 SSMR3PutMem(pSSM, &pVM->cpum.s.Guest, sizeof(pVM->cpum.s.Guest));
737 SSMR3PutU32(pSSM, pVM->cpum.s.fUseFlags);
738 SSMR3PutU32(pSSM, pVM->cpum.s.fChanged);
739
740 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
741 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
742
743 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
744 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
745
746 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
747 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
748
749 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
750
751 /* Add the cpuid for checking that the cpu is unchanged. */
752 uint32_t au32CpuId[8] = {0};
753 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
754 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
755 return SSMR3PutMem(pSSM, &au32CpuId[0], sizeof(au32CpuId));
756}
757
758/**
759 * Load a version 1.6 CPUMCTX structure.
760 *
761 * @returns VBox status code.
762 * @param pVM VM Handle.
763 * @param pCpumctx16 Version 1.6 CPUMCTX
764 */
765static void cpumR3LoadCPUM1_6(PVM pVM, CPUMCTX_VER1_6 *pCpumctx16)
766{
767#define CPUMCTX16_LOADREG(regname) pVM->cpum.s.Guest.regname = pCpumctx16->regname;
768
769#define CPUMCTX16_LOADHIDREG(regname) \
770 pVM->cpum.s.Guest.regname##Hid.u64Base = pCpumctx16->regname##Hid.u32Base; \
771 pVM->cpum.s.Guest.regname##Hid.u32Limit = pCpumctx16->regname##Hid.u32Limit; \
772 pVM->cpum.s.Guest.regname##Hid.Attr = pCpumctx16->regname##Hid.Attr;
773
774#define CPUMCTX16_LOADSEGREG(regname) \
775 pVM->cpum.s.Guest.regname = pCpumctx16->regname; \
776 CPUMCTX16_LOADHIDREG(regname);
777
778 pVM->cpum.s.Guest.fpu = pCpumctx16->fpu;
779
780 CPUMCTX16_LOADREG(rax);
781 CPUMCTX16_LOADREG(rbx);
782 CPUMCTX16_LOADREG(rcx);
783 CPUMCTX16_LOADREG(rdx);
784 CPUMCTX16_LOADREG(rdi);
785 CPUMCTX16_LOADREG(rsi);
786 CPUMCTX16_LOADREG(rbp);
787 CPUMCTX16_LOADREG(esp);
788 CPUMCTX16_LOADREG(rip);
789 CPUMCTX16_LOADREG(rflags);
790
791 CPUMCTX16_LOADSEGREG(cs);
792 CPUMCTX16_LOADSEGREG(ds);
793 CPUMCTX16_LOADSEGREG(es);
794 CPUMCTX16_LOADSEGREG(fs);
795 CPUMCTX16_LOADSEGREG(gs);
796 CPUMCTX16_LOADSEGREG(ss);
797
798 CPUMCTX16_LOADREG(r8);
799 CPUMCTX16_LOADREG(r9);
800 CPUMCTX16_LOADREG(r10);
801 CPUMCTX16_LOADREG(r11);
802 CPUMCTX16_LOADREG(r12);
803 CPUMCTX16_LOADREG(r13);
804 CPUMCTX16_LOADREG(r14);
805 CPUMCTX16_LOADREG(r15);
806
807 CPUMCTX16_LOADREG(cr0);
808 CPUMCTX16_LOADREG(cr2);
809 CPUMCTX16_LOADREG(cr3);
810 CPUMCTX16_LOADREG(cr4);
811
812 CPUMCTX16_LOADREG(dr0);
813 CPUMCTX16_LOADREG(dr1);
814 CPUMCTX16_LOADREG(dr2);
815 CPUMCTX16_LOADREG(dr3);
816 CPUMCTX16_LOADREG(dr4);
817 CPUMCTX16_LOADREG(dr5);
818 CPUMCTX16_LOADREG(dr6);
819 CPUMCTX16_LOADREG(dr7);
820
821 pVM->cpum.s.Guest.gdtr.cbGdt = pCpumctx16->gdtr.cbGdt;
822 pVM->cpum.s.Guest.gdtr.pGdt = pCpumctx16->gdtr.pGdt;
823 pVM->cpum.s.Guest.idtr.cbIdt = pCpumctx16->idtr.cbIdt;
824 pVM->cpum.s.Guest.idtr.pIdt = pCpumctx16->idtr.pIdt;
825
826 CPUMCTX16_LOADREG(ldtr);
827 CPUMCTX16_LOADREG(tr);
828
829 pVM->cpum.s.Guest.SysEnter = pCpumctx16->SysEnter;
830
831 CPUMCTX16_LOADREG(msrEFER);
832 CPUMCTX16_LOADREG(msrSTAR);
833 CPUMCTX16_LOADREG(msrPAT);
834 CPUMCTX16_LOADREG(msrLSTAR);
835 CPUMCTX16_LOADREG(msrCSTAR);
836 CPUMCTX16_LOADREG(msrSFMASK);
837 CPUMCTX16_LOADREG(msrKERNELGSBASE);
838
839 CPUMCTX16_LOADHIDREG(ldtr);
840 CPUMCTX16_LOADHIDREG(tr);
841
842#undef CPUMCTX16_LOADHIDREG
843#undef CPUMCTX16_LOADSEGREG
844#undef CPUMCTX16_LOADREG
845}
846
847/**
848 * Execute state load operation.
849 *
850 * @returns VBox status code.
851 * @param pVM VM Handle.
852 * @param pSSM SSM operation handle.
853 * @param u32Version Data layout version.
854 */
855static DECLCALLBACK(int) cpumR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
856{
857 /*
858 * Validate version.
859 */
860 if ( u32Version != CPUM_SAVED_STATE_VERSION
861 && u32Version != CPUM_SAVED_STATE_VERSION_VER1_6)
862 {
863 AssertMsgFailed(("cpuR3Load: Invalid version u32Version=%d!\n", u32Version));
864 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
865 }
866
867 /* Set the size of RTGCPTR for SSMR3GetGCPtr. */
868 if (u32Version == CPUM_SAVED_STATE_VERSION_VER1_6)
869 SSMR3SetGCPtrSize(pSSM, sizeof(RTGCPTR32));
870 else
871 SSMR3SetGCPtrSize(pSSM, sizeof(RTGCPTR));
872
873 /*
874 * Restore.
875 */
876 uint32_t uCR3 = pVM->cpum.s.Hyper.cr3;
877 uint32_t uESP = pVM->cpum.s.Hyper.esp; /* see VMMR3Relocate(). */
878 SSMR3GetMem(pSSM, &pVM->cpum.s.Hyper, sizeof(pVM->cpum.s.Hyper));
879 pVM->cpum.s.Hyper.cr3 = uCR3;
880 pVM->cpum.s.Hyper.esp = uESP;
881 if (u32Version == CPUM_SAVED_STATE_VERSION_VER1_6)
882 {
883 CPUMCTX_VER1_6 cpumctx16;
884 memset(&pVM->cpum.s.Guest, 0, sizeof(pVM->cpum.s.Guest));
885 SSMR3GetMem(pSSM, &cpumctx16, sizeof(cpumctx16));
886
887 /* Save the old cpumctx state into the new one. */
888 cpumR3LoadCPUM1_6(pVM, &cpumctx16);
889 }
890 else
891 SSMR3GetMem(pSSM, &pVM->cpum.s.Guest, sizeof(pVM->cpum.s.Guest));
892
893 SSMR3GetU32(pSSM, &pVM->cpum.s.fUseFlags);
894 SSMR3GetU32(pSSM, &pVM->cpum.s.fChanged);
895
896 uint32_t cElements;
897 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
898 /* Support old saved states with a smaller standard cpuid array. */
899 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
900 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
901 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdStd[0]));
902
903 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
904 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
905 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
906 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
907
908 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
909 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
910 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
911 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
912
913 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
914
915 /*
916 * Check that the basic cpuid id information is unchanged.
917 */
918 uint32_t au32CpuId[8] = {0};
919 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
920 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
921 uint32_t au32CpuIdSaved[8];
922 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
923 if (VBOX_SUCCESS(rc))
924 {
925 /* Ignore APIC ID (AMD specs). */
926 au32CpuId[5] &= ~0xff000000;
927 au32CpuIdSaved[5] &= ~0xff000000;
928 /* Ignore the number of Logical CPUs (AMD specs). */
929 au32CpuId[5] &= ~0x00ff0000;
930 au32CpuIdSaved[5] &= ~0x00ff0000;
931
932 /* do the compare */
933 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
934 {
935 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
936 LogRel(("cpumR3Load: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
937 "Saved=%.*Vhxs\n"
938 "Real =%.*Vhxs\n",
939 sizeof(au32CpuIdSaved), au32CpuIdSaved,
940 sizeof(au32CpuId), au32CpuId));
941 else
942 {
943 LogRel(("cpumR3Load: CpuId mismatch!\n"
944 "Saved=%.*Vhxs\n"
945 "Real =%.*Vhxs\n",
946 sizeof(au32CpuIdSaved), au32CpuIdSaved,
947 sizeof(au32CpuId), au32CpuId));
948 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
949 }
950 }
951 }
952
953 return rc;
954}
955
956
957/**
958 * Formats the EFLAGS value into mnemonics.
959 *
960 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
961 * @param efl The EFLAGS value.
962 */
963static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
964{
965 /*
966 * Format the flags.
967 */
968 static struct
969 {
970 const char *pszSet; const char *pszClear; uint32_t fFlag;
971 } s_aFlags[] =
972 {
973 { "vip",NULL, X86_EFL_VIP },
974 { "vif",NULL, X86_EFL_VIF },
975 { "ac", NULL, X86_EFL_AC },
976 { "vm", NULL, X86_EFL_VM },
977 { "rf", NULL, X86_EFL_RF },
978 { "nt", NULL, X86_EFL_NT },
979 { "ov", "nv", X86_EFL_OF },
980 { "dn", "up", X86_EFL_DF },
981 { "ei", "di", X86_EFL_IF },
982 { "tf", NULL, X86_EFL_TF },
983 { "nt", "pl", X86_EFL_SF },
984 { "nz", "zr", X86_EFL_ZF },
985 { "ac", "na", X86_EFL_AF },
986 { "po", "pe", X86_EFL_PF },
987 { "cy", "nc", X86_EFL_CF },
988 };
989 char *psz = pszEFlags;
990 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
991 {
992 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
993 if (pszAdd)
994 {
995 strcpy(psz, pszAdd);
996 psz += strlen(pszAdd);
997 *psz++ = ' ';
998 }
999 }
1000 psz[-1] = '\0';
1001}
1002
1003
1004/**
1005 * Formats a full register dump.
1006 *
1007 * @param pVM VM Handle.
1008 * @param pCtx The context to format.
1009 * @param pCtxCore The context core to format.
1010 * @param pHlp Output functions.
1011 * @param enmType The dump type.
1012 * @param pszPrefix Register name prefix.
1013 */
1014static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType, const char *pszPrefix)
1015{
1016 /*
1017 * Format the EFLAGS.
1018 */
1019 uint32_t efl = pCtxCore->eflags.u32;
1020 char szEFlags[80];
1021 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1022
1023 /*
1024 * Format the registers.
1025 */
1026 switch (enmType)
1027 {
1028 case CPUMDUMPTYPE_TERSE:
1029 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
1030 {
1031 pHlp->pfnPrintf(pHlp,
1032 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1033 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1034 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1035 "%sr14=%016RX64 %sr15=%016RX64\n"
1036 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1037 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1038 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1039 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1040 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1041 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1042 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1043 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
1044 }
1045 else
1046 pHlp->pfnPrintf(pHlp,
1047 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1048 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1049 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1050 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1051 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1052 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1053 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
1054 break;
1055
1056 case CPUMDUMPTYPE_DEFAULT:
1057 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
1058 {
1059 pHlp->pfnPrintf(pHlp,
1060 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1061 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1062 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1063 "%sr14=%016RX64 %sr15=%016RX64\n"
1064 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1065 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1066 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%VGv:%04x %sldtr=%04x\n"
1067 ,
1068 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1069 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1070 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1071 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1072 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1073 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
1074 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1075 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
1076 }
1077 else
1078 pHlp->pfnPrintf(pHlp,
1079 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1080 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1081 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1082 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%VGv:%04x %sldtr=%04x\n"
1083 ,
1084 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1085 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1086 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1087 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
1088 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1089 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
1090 break;
1091
1092 case CPUMDUMPTYPE_VERBOSE:
1093 if (CPUMIsGuestIn64BitCode(pVM, pCtxCore))
1094 {
1095 pHlp->pfnPrintf(pHlp,
1096 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1097 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1098 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1099 "%sr14=%016RX64 %sr15=%016RX64\n"
1100 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1101 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1102 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1103 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1104 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1105 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1106 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1107 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
1108 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
1109 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
1110 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1111 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1112 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1113 "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
1114 ,
1115 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1116 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1117 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1118 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1119 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
1120 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
1121 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
1122 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
1123 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
1124 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
1125 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1126 pszPrefix, pCtx->dr0, pszPrefix, pCtx->dr1, pszPrefix, pCtx->dr2, pszPrefix, pCtx->dr3,
1127 pszPrefix, pCtx->dr4, pszPrefix, pCtx->dr5, pszPrefix, pCtx->dr6, pszPrefix, pCtx->dr7,
1128 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1129 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1130 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1131 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1132 }
1133 else
1134 pHlp->pfnPrintf(pHlp,
1135 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1136 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1137 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
1138 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
1139 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
1140 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
1141 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
1142 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
1143 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1144 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1145 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1146 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1147 ,
1148 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1149 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1150 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pszPrefix, pCtx->dr0, pszPrefix, pCtx->dr1,
1151 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pszPrefix, pCtx->dr2, pszPrefix, pCtx->dr3,
1152 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pszPrefix, pCtx->dr4, pszPrefix, pCtx->dr5,
1153 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pszPrefix, pCtx->dr6, pszPrefix, pCtx->dr7,
1154 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
1155 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1156 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1157 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1158 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1159 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1160
1161 pHlp->pfnPrintf(pHlp,
1162 "FPU:\n"
1163 "%sFCW=%04x %sFSW=%04x %sFTW=%02x\n"
1164 "%sres1=%02x %sFOP=%04x %sFPUIP=%08x %sCS=%04x %sRsvrd1=%04x\n"
1165 "%sFPUDP=%04x %sDS=%04x %sRsvrd2=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
1166 ,
1167 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW,
1168 pszPrefix, pCtx->fpu.huh1, pszPrefix, pCtx->fpu.FOP, pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsvrd1,
1169 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2,
1170 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK);
1171
1172
1173 pHlp->pfnPrintf(pHlp,
1174 "MSR:\n"
1175 "%sEFER =%016RX64\n"
1176 "%sPAT =%016RX64\n"
1177 "%sSTAR =%016RX64\n"
1178 "%sCSTAR =%016RX64\n"
1179 "%sLSTAR =%016RX64\n"
1180 "%sSFMASK =%016RX64\n"
1181 "%sKERNELGSBASE =%016RX64\n",
1182 pszPrefix, pCtx->msrEFER,
1183 pszPrefix, pCtx->msrPAT,
1184 pszPrefix, pCtx->msrSTAR,
1185 pszPrefix, pCtx->msrCSTAR,
1186 pszPrefix, pCtx->msrLSTAR,
1187 pszPrefix, pCtx->msrSFMASK,
1188 pszPrefix, pCtx->msrKERNELGSBASE);
1189
1190 break;
1191 }
1192}
1193
1194
1195/**
1196 * Display all cpu states and any other cpum info.
1197 *
1198 * @param pVM VM Handle.
1199 * @param pHlp The info helper functions.
1200 * @param pszArgs Arguments, ignored.
1201 */
1202static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1203{
1204 cpumR3InfoGuest(pVM, pHlp, pszArgs);
1205 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
1206 cpumR3InfoHyper(pVM, pHlp, pszArgs);
1207 cpumR3InfoHost(pVM, pHlp, pszArgs);
1208}
1209
1210
1211/**
1212 * Parses the info argument.
1213 *
1214 * The argument starts with 'verbose', 'terse' or 'default' and then
1215 * continues with the comment string.
1216 *
1217 * @param pszArgs The pointer to the argument string.
1218 * @param penmType Where to store the dump type request.
1219 * @param ppszComment Where to store the pointer to the comment string.
1220 */
1221static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
1222{
1223 if (!pszArgs)
1224 {
1225 *penmType = CPUMDUMPTYPE_DEFAULT;
1226 *ppszComment = "";
1227 }
1228 else
1229 {
1230 if (!strncmp(pszArgs, "verbose", sizeof("verbose") - 1))
1231 {
1232 pszArgs += 5;
1233 *penmType = CPUMDUMPTYPE_VERBOSE;
1234 }
1235 else if (!strncmp(pszArgs, "terse", sizeof("terse") - 1))
1236 {
1237 pszArgs += 5;
1238 *penmType = CPUMDUMPTYPE_TERSE;
1239 }
1240 else if (!strncmp(pszArgs, "default", sizeof("default") - 1))
1241 {
1242 pszArgs += 7;
1243 *penmType = CPUMDUMPTYPE_DEFAULT;
1244 }
1245 else
1246 *penmType = CPUMDUMPTYPE_DEFAULT;
1247 *ppszComment = RTStrStripL(pszArgs);
1248 }
1249}
1250
1251
1252/**
1253 * Display the guest cpu state.
1254 *
1255 * @param pVM VM Handle.
1256 * @param pHlp The info helper functions.
1257 * @param pszArgs Arguments, ignored.
1258 */
1259static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1260{
1261 CPUMDUMPTYPE enmType;
1262 const char *pszComment;
1263 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1264 pHlp->pfnPrintf(pHlp, "Guest CPUM state: %s\n", pszComment);
1265 cpumR3InfoOne(pVM, &pVM->cpum.s.Guest, CPUMCTX2CORE(&pVM->cpum.s.Guest), pHlp, enmType, "");
1266}
1267
1268/**
1269 * Display the current guest instruction
1270 *
1271 * @param pVM VM Handle.
1272 * @param pHlp The info helper functions.
1273 * @param pszArgs Arguments, ignored.
1274 */
1275static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1276{
1277 char szInstruction[256];
1278 int rc = DBGFR3DisasInstrCurrent(pVM, szInstruction, sizeof(szInstruction));
1279 if (VBOX_SUCCESS(rc))
1280 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
1281}
1282
1283
1284/**
1285 * Display the hypervisor cpu state.
1286 *
1287 * @param pVM VM Handle.
1288 * @param pHlp The info helper functions.
1289 * @param pszArgs Arguments, ignored.
1290 */
1291static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1292{
1293 CPUMDUMPTYPE enmType;
1294 const char *pszComment;
1295 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1296 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
1297 cpumR3InfoOne(pVM, &pVM->cpum.s.Hyper, pVM->cpum.s.pHyperCoreR3, pHlp, enmType, ".");
1298 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
1299}
1300
1301
1302/**
1303 * Display the host cpu state.
1304 *
1305 * @param pVM VM Handle.
1306 * @param pHlp The info helper functions.
1307 * @param pszArgs Arguments, ignored.
1308 */
1309static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1310{
1311 CPUMDUMPTYPE enmType;
1312 const char *pszComment;
1313 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1314 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
1315
1316 /*
1317 * Format the EFLAGS.
1318 */
1319 PCPUMHOSTCTX pCtx = &pVM->cpum.s.Host;
1320#if HC_ARCH_BITS == 32
1321 uint32_t efl = pCtx->eflags.u32;
1322#else
1323 uint64_t efl = pCtx->rflags;
1324#endif
1325 char szEFlags[80];
1326 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1327
1328 /*
1329 * Format the registers.
1330 */
1331#if HC_ARCH_BITS == 32
1332# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1333 if (!(pCtx->efer & MSR_K6_EFER_LMA))
1334# endif
1335 {
1336 pHlp->pfnPrintf(pHlp,
1337 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
1338 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
1339 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
1340 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
1341 "dr0=%08RX64 dr1=%08RX64x dr2=%08RX64 dr3=%08RX64x dr6=%08RX64 dr7=%08RX64\n"
1342 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1343 ,
1344 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
1345 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
1346 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1347 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
1348 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
1349 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, (RTSEL)pCtx->ldtr,
1350 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1351 }
1352# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
1353 else
1354# endif
1355#endif
1356#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1357 {
1358 pHlp->pfnPrintf(pHlp,
1359 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
1360 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
1361 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
1362 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
1363 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1364 "r14=%016RX64 r15=%016RX64\n"
1365 "iopl=%d %31s\n"
1366 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
1367 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
1368 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
1369 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64\n"
1370 "dr3=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1371 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
1372 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1373 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
1374 ,
1375 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
1376 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
1377 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
1378 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
1379 pCtx->r11, pCtx->r12, pCtx->r13,
1380 pCtx->r14, pCtx->r15,
1381 X86_EFL_GET_IOPL(efl), szEFlags,
1382 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1383 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
1384 pCtx->cr4, pCtx->ldtr, pCtx->tr,
1385 pCtx->dr0, pCtx->dr1, pCtx->dr2,
1386 pCtx->dr3, pCtx->dr6, pCtx->dr7,
1387 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
1388 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1389 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
1390 }
1391#endif
1392}
1393
1394
1395/**
1396 * Get L1 cache / TLS associativity.
1397 */
1398static const char *getCacheAss(unsigned u, char *pszBuf)
1399{
1400 if (u == 0)
1401 return "res0 ";
1402 if (u == 1)
1403 return "direct";
1404 if (u >= 256)
1405 return "???";
1406
1407 RTStrPrintf(pszBuf, 16, "%d way", u);
1408 return pszBuf;
1409}
1410
1411
1412/**
1413 * Get L2 cache soociativity.
1414 */
1415const char *getL2CacheAss(unsigned u)
1416{
1417 switch (u)
1418 {
1419 case 0: return "off ";
1420 case 1: return "direct";
1421 case 2: return "2 way ";
1422 case 3: return "res3 ";
1423 case 4: return "4 way ";
1424 case 5: return "res5 ";
1425 case 6: return "8 way "; case 7: return "res7 ";
1426 case 8: return "16 way";
1427 case 9: return "res9 ";
1428 case 10: return "res10 ";
1429 case 11: return "res11 ";
1430 case 12: return "res12 ";
1431 case 13: return "res13 ";
1432 case 14: return "res14 ";
1433 case 15: return "fully ";
1434 default:
1435 return "????";
1436 }
1437}
1438
1439
1440/**
1441 * Display the guest CpuId leaves.
1442 *
1443 * @param pVM VM Handle.
1444 * @param pHlp The info helper functions.
1445 * @param pszArgs "terse", "default" or "verbose".
1446 */
1447static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1448{
1449 /*
1450 * Parse the argument.
1451 */
1452 unsigned iVerbosity = 1;
1453 if (pszArgs)
1454 {
1455 pszArgs = RTStrStripL(pszArgs);
1456 if (!strcmp(pszArgs, "terse"))
1457 iVerbosity--;
1458 else if (!strcmp(pszArgs, "verbose"))
1459 iVerbosity++;
1460 }
1461
1462 /*
1463 * Start cracking.
1464 */
1465 CPUMCPUID Host;
1466 CPUMCPUID Guest;
1467 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdStd[0].eax;
1468
1469 pHlp->pfnPrintf(pHlp,
1470 " RAW Standard CPUIDs\n"
1471 " Function eax ebx ecx edx\n");
1472 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
1473 {
1474 Guest = pVM->cpum.s.aGuestCpuIdStd[i];
1475 ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1476
1477 pHlp->pfnPrintf(pHlp,
1478 "Gst: %08x %08x %08x %08x %08x%s\n"
1479 "Hst: %08x %08x %08x %08x\n",
1480 i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1481 i <= cStdMax ? "" : "*",
1482 Host.eax, Host.ebx, Host.ecx, Host.edx);
1483 }
1484
1485 /*
1486 * If verbose, decode it.
1487 */
1488 if (iVerbosity)
1489 {
1490 Guest = pVM->cpum.s.aGuestCpuIdStd[0];
1491 pHlp->pfnPrintf(pHlp,
1492 "Name: %.04s%.04s%.04s\n"
1493 "Supports: 0-%x\n",
1494 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
1495 }
1496
1497 /*
1498 * Get Features.
1499 */
1500 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdStd[0].ebx,
1501 pVM->cpum.s.aGuestCpuIdStd[0].ecx,
1502 pVM->cpum.s.aGuestCpuIdStd[0].edx);
1503 if (cStdMax >= 1 && iVerbosity)
1504 {
1505 Guest = pVM->cpum.s.aGuestCpuIdStd[1];
1506 uint32_t uEAX = Guest.eax;
1507
1508 pHlp->pfnPrintf(pHlp,
1509 "Family: %d \tExtended: %d \tEffective: %d\n"
1510 "Model: %d \tExtended: %d \tEffective: %d\n"
1511 "Stepping: %d\n"
1512 "APIC ID: %#04x\n"
1513 "Logical CPUs: %d\n"
1514 "CLFLUSH Size: %d\n"
1515 "Brand ID: %#04x\n",
1516 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
1517 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
1518 ASMGetCpuStepping(uEAX),
1519 (Guest.ebx >> 24) & 0xff,
1520 (Guest.ebx >> 16) & 0xff,
1521 (Guest.ebx >> 8) & 0xff,
1522 (Guest.ebx >> 0) & 0xff);
1523 if (iVerbosity == 1)
1524 {
1525 uint32_t uEDX = Guest.edx;
1526 pHlp->pfnPrintf(pHlp, "Features EDX: ");
1527 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
1528 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
1529 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
1530 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
1531 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
1532 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
1533 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
1534 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
1535 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
1536 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
1537 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
1538 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SEP");
1539 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
1540 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
1541 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
1542 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
1543 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
1544 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
1545 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " PSN");
1546 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " CLFSH");
1547 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " 20");
1548 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " DS");
1549 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ACPI");
1550 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
1551 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
1552 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " SSE");
1553 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " SSE2");
1554 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " SS");
1555 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " HTT");
1556 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " TM");
1557 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
1558 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " PBE");
1559 pHlp->pfnPrintf(pHlp, "\n");
1560
1561 uint32_t uECX = Guest.ecx;
1562 pHlp->pfnPrintf(pHlp, "Features ECX: ");
1563 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3");
1564 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " 1");
1565 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " 2");
1566 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " MONITOR");
1567 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " DS-CPL");
1568 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " VMX");
1569 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " 6");
1570 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " EST");
1571 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " TM2");
1572 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " 9");
1573 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " CNXT-ID");
1574 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " 11");
1575 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " 12");
1576 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " CX16");
1577 for (unsigned iBit = 14; iBit < 32; iBit++)
1578 if (uECX & RT_BIT(iBit))
1579 pHlp->pfnPrintf(pHlp, " %d", iBit);
1580 pHlp->pfnPrintf(pHlp, "\n");
1581 }
1582 else
1583 {
1584 ASMCpuId(1, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1585
1586 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
1587 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
1588 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
1589 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
1590
1591 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
1592 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", EdxGuest.u1FPU, EdxHost.u1FPU);
1593 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", EdxGuest.u1VME, EdxHost.u1VME);
1594 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", EdxGuest.u1DE, EdxHost.u1DE);
1595 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", EdxGuest.u1PSE, EdxHost.u1PSE);
1596 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", EdxGuest.u1TSC, EdxHost.u1TSC);
1597 pHlp->pfnPrintf(pHlp, "MSR - Model Specific Registers = %d (%d)\n", EdxGuest.u1MSR, EdxHost.u1MSR);
1598 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", EdxGuest.u1PAE, EdxHost.u1PAE);
1599 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", EdxGuest.u1MCE, EdxHost.u1MCE);
1600 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", EdxGuest.u1CX8, EdxHost.u1CX8);
1601 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", EdxGuest.u1APIC, EdxHost.u1APIC);
1602 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved1, EdxHost.u1Reserved1);
1603 pHlp->pfnPrintf(pHlp, "SEP - SYSENTER and SYSEXIT = %d (%d)\n", EdxGuest.u1SEP, EdxHost.u1SEP);
1604 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", EdxGuest.u1MTRR, EdxHost.u1MTRR);
1605 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", EdxGuest.u1PGE, EdxHost.u1PGE);
1606 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", EdxGuest.u1MCA, EdxHost.u1MCA);
1607 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", EdxGuest.u1CMOV, EdxHost.u1CMOV);
1608 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", EdxGuest.u1PAT, EdxHost.u1PAT);
1609 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", EdxGuest.u1PSE36, EdxHost.u1PSE36);
1610 pHlp->pfnPrintf(pHlp, "PSN - Processor Serial Number = %d (%d)\n", EdxGuest.u1PSN, EdxHost.u1PSN);
1611 pHlp->pfnPrintf(pHlp, "CLFSH - CLFLUSH Instruction. = %d (%d)\n", EdxGuest.u1CLFSH, EdxHost.u1CLFSH);
1612 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved2, EdxHost.u1Reserved2);
1613 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", EdxGuest.u1DS, EdxHost.u1DS);
1614 pHlp->pfnPrintf(pHlp, "ACPI - Thermal Mon. & Soft. Clock Ctrl.= %d (%d)\n", EdxGuest.u1ACPI, EdxHost.u1ACPI);
1615 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", EdxGuest.u1MMX, EdxHost.u1MMX);
1616 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", EdxGuest.u1FXSR, EdxHost.u1FXSR);
1617 pHlp->pfnPrintf(pHlp, "SSE - SSE Support = %d (%d)\n", EdxGuest.u1SSE, EdxHost.u1SSE);
1618 pHlp->pfnPrintf(pHlp, "SSE2 - SSE2 Support = %d (%d)\n", EdxGuest.u1SSE2, EdxHost.u1SSE2);
1619 pHlp->pfnPrintf(pHlp, "SS - Self Snoop = %d (%d)\n", EdxGuest.u1SS, EdxHost.u1SS);
1620 pHlp->pfnPrintf(pHlp, "HTT - Hyper-Threading Technolog = %d (%d)\n", EdxGuest.u1HTT, EdxHost.u1HTT);
1621 pHlp->pfnPrintf(pHlp, "TM - Thermal Monitor = %d (%d)\n", EdxGuest.u1TM, EdxHost.u1TM);
1622 pHlp->pfnPrintf(pHlp, "30 - Reserved = %d (%d)\n", EdxGuest.u1Reserved3, EdxHost.u1Reserved3);
1623 pHlp->pfnPrintf(pHlp, "PBE - Pending Break Enable = %d (%d)\n", EdxGuest.u1PBE, EdxHost.u1PBE);
1624
1625 pHlp->pfnPrintf(pHlp, "Supports SSE3 or not = %d (%d)\n", EcxGuest.u1SSE3, EcxHost.u1SSE3);
1626 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EcxGuest.u2Reserved1, EcxHost.u2Reserved1);
1627 pHlp->pfnPrintf(pHlp, "Supports MONITOR/MWAIT = %d (%d)\n", EcxGuest.u1Monitor, EcxHost.u1Monitor);
1628 pHlp->pfnPrintf(pHlp, "CPL-DS - CPL Qualified Debug Store = %d (%d)\n", EcxGuest.u1CPLDS, EcxHost.u1CPLDS);
1629 pHlp->pfnPrintf(pHlp, "VMX - Virtual Machine Technology = %d (%d)\n", EcxGuest.u1VMX, EcxHost.u1VMX);
1630 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EcxGuest.u1Reserved2, EcxHost.u1Reserved2);
1631 pHlp->pfnPrintf(pHlp, "Enhanced SpeedStep Technology = %d (%d)\n", EcxGuest.u1EST, EcxHost.u1EST);
1632 pHlp->pfnPrintf(pHlp, "Terminal Monitor 2 = %d (%d)\n", EcxGuest.u1TM2, EcxHost.u1TM2);
1633 pHlp->pfnPrintf(pHlp, "Supports Supplemental SSE3 or not = %d (%d)\n", EcxGuest.u1SSSE3, EcxHost.u1SSSE3);
1634 pHlp->pfnPrintf(pHlp, "L1 Context ID = %d (%d)\n", EcxGuest.u1CNTXID, EcxHost.u1CNTXID);
1635 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved4, EcxHost.u2Reserved4);
1636 pHlp->pfnPrintf(pHlp, "CMPXCHG16B = %d (%d)\n", EcxGuest.u1CX16, EcxHost.u1CX16);
1637 pHlp->pfnPrintf(pHlp, "xTPR Update Control = %d (%d)\n", EcxGuest.u1TPRUpdate, EcxHost.u1TPRUpdate);
1638 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u17Reserved5, EcxHost.u17Reserved5);
1639 }
1640 }
1641 if (cStdMax >= 2 && iVerbosity)
1642 {
1643 /** @todo */
1644 }
1645
1646 /*
1647 * Extended.
1648 * Implemented after AMD specs.
1649 */
1650 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdExt[0].eax & 0xffff;
1651
1652 pHlp->pfnPrintf(pHlp,
1653 "\n"
1654 " RAW Extended CPUIDs\n"
1655 " Function eax ebx ecx edx\n");
1656 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt); i++)
1657 {
1658 Guest = pVM->cpum.s.aGuestCpuIdExt[i];
1659 ASMCpuId(0x80000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1660
1661 pHlp->pfnPrintf(pHlp,
1662 "Gst: %08x %08x %08x %08x %08x%s\n"
1663 "Hst: %08x %08x %08x %08x\n",
1664 0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1665 i <= cExtMax ? "" : "*",
1666 Host.eax, Host.ebx, Host.ecx, Host.edx);
1667 }
1668
1669 /*
1670 * Understandable output
1671 */
1672 if (iVerbosity && cExtMax >= 0)
1673 {
1674 Guest = pVM->cpum.s.aGuestCpuIdExt[0];
1675 pHlp->pfnPrintf(pHlp,
1676 "Ext Name: %.4s%.4s%.4s\n"
1677 "Ext Supports: 0x80000000-%#010x\n",
1678 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
1679 }
1680
1681 if (iVerbosity && cExtMax >= 1)
1682 {
1683 Guest = pVM->cpum.s.aGuestCpuIdExt[1];
1684 uint32_t uEAX = Guest.eax;
1685 pHlp->pfnPrintf(pHlp,
1686 "Family: %d \tExtended: %d \tEffective: %d\n"
1687 "Model: %d \tExtended: %d \tEffective: %d\n"
1688 "Stepping: %d\n"
1689 "Brand ID: %#05x\n",
1690 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
1691 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
1692 ASMGetCpuStepping(uEAX),
1693 Guest.ebx & 0xfff);
1694
1695 if (iVerbosity == 1)
1696 {
1697 uint32_t uEDX = Guest.edx;
1698 pHlp->pfnPrintf(pHlp, "Features EDX: ");
1699 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
1700 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
1701 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
1702 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
1703 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
1704 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
1705 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
1706 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
1707 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
1708 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
1709 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
1710 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SCR");
1711 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
1712 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
1713 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
1714 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
1715 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
1716 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
1717 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " 18");
1718 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " 19");
1719 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " NX");
1720 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " 21");
1721 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ExtMMX");
1722 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
1723 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
1724 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " FastFXSR");
1725 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " Page1GB");
1726 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " RDTSCP");
1727 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " 28");
1728 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " LongMode");
1729 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " Ext3DNow");
1730 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 3DNow");
1731 pHlp->pfnPrintf(pHlp, "\n");
1732
1733 uint32_t uECX = Guest.ecx;
1734 pHlp->pfnPrintf(pHlp, "Features ECX: ");
1735 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
1736 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " CMPL");
1737 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " SVM");
1738 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " ExtAPIC");
1739 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " CR8L");
1740 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " ABM");
1741 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SSE4A");
1742 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MISALNSSE");
1743 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " 3DNOWPRF");
1744 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " OSVW");
1745 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " IBS");
1746 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SSE5");
1747 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " SKINIT");
1748 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " WDT");
1749 for (unsigned iBit = 5; iBit < 32; iBit++)
1750 if (uECX & RT_BIT(iBit))
1751 pHlp->pfnPrintf(pHlp, " %d", iBit);
1752 pHlp->pfnPrintf(pHlp, "\n");
1753 }
1754 else
1755 {
1756 ASMCpuId(0x80000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1757
1758 uint32_t uEdxGst = Guest.edx;
1759 uint32_t uEdxHst = Host.edx;
1760 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
1761 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
1762 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
1763 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
1764 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
1765 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
1766 pHlp->pfnPrintf(pHlp, "MSR - K86 Model Specific Registers = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
1767 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
1768 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
1769 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
1770 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
1771 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
1772 pHlp->pfnPrintf(pHlp, "SEP - SYSCALL and SYSRET = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
1773 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
1774 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
1775 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
1776 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
1777 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
1778 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
1779 pHlp->pfnPrintf(pHlp, "18 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
1780 pHlp->pfnPrintf(pHlp, "19 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
1781 pHlp->pfnPrintf(pHlp, "NX - No-Execute Page Protection = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
1782 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
1783 pHlp->pfnPrintf(pHlp, "AXMMX - AMD Extensions to MMX Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
1784 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
1785 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
1786 pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
1787 pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
1788 pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));
1789 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28)));
1790 pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));
1791 pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));
1792 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));
1793
1794 uint32_t uEcxGst = Guest.ecx;
1795 uint32_t uEcxHst = Host.ecx;
1796 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0)));
1797 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1)));
1798 pHlp->pfnPrintf(pHlp, "SVM - AMD VM Extensions = %d (%d)\n", !!(uEcxGst & RT_BIT( 2)), !!(uEcxHst & RT_BIT( 2)));
1799 pHlp->pfnPrintf(pHlp, "APIC registers starting at 0x400 = %d (%d)\n", !!(uEcxGst & RT_BIT( 3)), !!(uEcxHst & RT_BIT( 3)));
1800 pHlp->pfnPrintf(pHlp, "AltMovCR8 - LOCK MOV CR0 means MOV CR8 = %d (%d)\n", !!(uEcxGst & RT_BIT( 4)), !!(uEcxHst & RT_BIT( 4)));
1801 pHlp->pfnPrintf(pHlp, "Advanced bit manipulation = %d (%d)\n", !!(uEcxGst & RT_BIT( 5)), !!(uEcxHst & RT_BIT( 5)));
1802 pHlp->pfnPrintf(pHlp, "SSE4A instruction support = %d (%d)\n", !!(uEcxGst & RT_BIT( 6)), !!(uEcxHst & RT_BIT( 6)));
1803 pHlp->pfnPrintf(pHlp, "Misaligned SSE mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 7)), !!(uEcxHst & RT_BIT( 7)));
1804 pHlp->pfnPrintf(pHlp, "PREFETCH and PREFETCHW instruction = %d (%d)\n", !!(uEcxGst & RT_BIT( 8)), !!(uEcxHst & RT_BIT( 8)));
1805 pHlp->pfnPrintf(pHlp, "OS visible workaround = %d (%d)\n", !!(uEcxGst & RT_BIT( 9)), !!(uEcxHst & RT_BIT( 9)));
1806 pHlp->pfnPrintf(pHlp, "Instruction based sampling = %d (%d)\n", !!(uEcxGst & RT_BIT(10)), !!(uEcxHst & RT_BIT(10)));
1807 pHlp->pfnPrintf(pHlp, "SSE5 support = %d (%d)\n", !!(uEcxGst & RT_BIT(11)), !!(uEcxHst & RT_BIT(11)));
1808 pHlp->pfnPrintf(pHlp, "SKINIT, STGI, and DEV support = %d (%d)\n", !!(uEcxGst & RT_BIT(12)), !!(uEcxHst & RT_BIT(12)));
1809 pHlp->pfnPrintf(pHlp, "Watchdog timer support. = %d (%d)\n", !!(uEcxGst & RT_BIT(13)), !!(uEcxHst & RT_BIT(13)));
1810 pHlp->pfnPrintf(pHlp, "31:14 - Reserved = %#x (%#x)\n", uEcxGst >> 14, uEcxHst >> 14);
1811 }
1812 }
1813
1814 if (iVerbosity && cExtMax >= 2)
1815 {
1816 char szString[4*4*3+1] = {0};
1817 uint32_t *pu32 = (uint32_t *)szString;
1818 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].eax;
1819 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ebx;
1820 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ecx;
1821 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].edx;
1822 if (cExtMax >= 3)
1823 {
1824 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].eax;
1825 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ebx;
1826 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ecx;
1827 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].edx;
1828 }
1829 if (cExtMax >= 4)
1830 {
1831 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].eax;
1832 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ebx;
1833 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ecx;
1834 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].edx;
1835 }
1836 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString);
1837 }
1838
1839 if (iVerbosity && cExtMax >= 5)
1840 {
1841 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[5].eax;
1842 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[5].ebx;
1843 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[5].ecx;
1844 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[5].edx;
1845 char sz1[32];
1846 char sz2[32];
1847
1848 pHlp->pfnPrintf(pHlp,
1849 "TLB 2/4M Instr/Uni: %s %3d entries\n"
1850 "TLB 2/4M Data: %s %3d entries\n",
1851 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
1852 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
1853 pHlp->pfnPrintf(pHlp,
1854 "TLB 4K Instr/Uni: %s %3d entries\n"
1855 "TLB 4K Data: %s %3d entries\n",
1856 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
1857 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
1858 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
1859 "L1 Instr Cache Lines Per Tag: %d\n"
1860 "L1 Instr Cache Associativity: %s\n"
1861 "L1 Instr Cache Size: %d KB\n",
1862 (uEDX >> 0) & 0xff,
1863 (uEDX >> 8) & 0xff,
1864 getCacheAss((uEDX >> 16) & 0xff, sz1),
1865 (uEDX >> 24) & 0xff);
1866 pHlp->pfnPrintf(pHlp,
1867 "L1 Data Cache Line Size: %d bytes\n"
1868 "L1 Data Cache Lines Per Tag: %d\n"
1869 "L1 Data Cache Associativity: %s\n"
1870 "L1 Data Cache Size: %d KB\n",
1871 (uECX >> 0) & 0xff,
1872 (uECX >> 8) & 0xff,
1873 getCacheAss((uECX >> 16) & 0xff, sz1),
1874 (uECX >> 24) & 0xff);
1875 }
1876
1877 if (iVerbosity && cExtMax >= 6)
1878 {
1879 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[6].eax;
1880 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[6].ebx;
1881 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[6].edx;
1882
1883 pHlp->pfnPrintf(pHlp,
1884 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
1885 "L2 TLB 2/4M Data: %s %4d entries\n",
1886 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
1887 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
1888 pHlp->pfnPrintf(pHlp,
1889 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
1890 "L2 TLB 4K Data: %s %4d entries\n",
1891 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
1892 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
1893 pHlp->pfnPrintf(pHlp,
1894 "L2 Cache Line Size: %d bytes\n"
1895 "L2 Cache Lines Per Tag: %d\n"
1896 "L2 Cache Associativity: %s\n"
1897 "L2 Cache Size: %d KB\n",
1898 (uEDX >> 0) & 0xff,
1899 (uEDX >> 8) & 0xf,
1900 getL2CacheAss((uEDX >> 12) & 0xf),
1901 (uEDX >> 16) & 0xffff);
1902 }
1903
1904 if (iVerbosity && cExtMax >= 7)
1905 {
1906 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[7].edx;
1907
1908 pHlp->pfnPrintf(pHlp, "APM Features: ");
1909 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " TS");
1910 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " FID");
1911 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " VID");
1912 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " TTP");
1913 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TM");
1914 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " STC");
1915 for (unsigned iBit = 6; iBit < 32; iBit++)
1916 if (uEDX & RT_BIT(iBit))
1917 pHlp->pfnPrintf(pHlp, " %d", iBit);
1918 pHlp->pfnPrintf(pHlp, "\n");
1919 }
1920
1921 if (iVerbosity && cExtMax >= 8)
1922 {
1923 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[8].eax;
1924 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[8].ecx;
1925
1926 pHlp->pfnPrintf(pHlp,
1927 "Physical Address Width: %d bits\n"
1928 "Virtual Address Width: %d bits\n",
1929 (uEAX >> 0) & 0xff,
1930 (uEAX >> 8) & 0xff);
1931 pHlp->pfnPrintf(pHlp,
1932 "Physical Core Count: %d\n",
1933 (uECX >> 0) & 0xff);
1934 }
1935
1936
1937 /*
1938 * Centaur.
1939 */
1940 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdCentaur[0].eax & 0xffff;
1941
1942 pHlp->pfnPrintf(pHlp,
1943 "\n"
1944 " RAW Centaur CPUIDs\n"
1945 " Function eax ebx ecx edx\n");
1946 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); i++)
1947 {
1948 Guest = pVM->cpum.s.aGuestCpuIdCentaur[i];
1949 ASMCpuId(0xc0000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1950
1951 pHlp->pfnPrintf(pHlp,
1952 "Gst: %08x %08x %08x %08x %08x%s\n"
1953 "Hst: %08x %08x %08x %08x\n",
1954 0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1955 i <= cCentaurMax ? "" : "*",
1956 Host.eax, Host.ebx, Host.ecx, Host.edx);
1957 }
1958
1959 /*
1960 * Understandable output
1961 */
1962 if (iVerbosity && cCentaurMax >= 0)
1963 {
1964 Guest = pVM->cpum.s.aGuestCpuIdCentaur[0];
1965 pHlp->pfnPrintf(pHlp,
1966 "Centaur Supports: 0xc0000000-%#010x\n",
1967 Guest.eax);
1968 }
1969
1970 if (iVerbosity && cCentaurMax >= 1)
1971 {
1972 ASMCpuId(0xc0000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1973 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdExt[1].edx;
1974 uint32_t uEdxHst = Host.edx;
1975
1976 if (iVerbosity == 1)
1977 {
1978 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
1979 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
1980 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
1981 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
1982 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
1983 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
1984 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
1985 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
1986 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
1987 /* possibly indicating MM/HE and MM/HE-E on older chips... */
1988 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
1989 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
1990 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
1991 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
1992 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
1993 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
1994 for (unsigned iBit = 14; iBit < 32; iBit++)
1995 if (uEdxGst & RT_BIT(iBit))
1996 pHlp->pfnPrintf(pHlp, " %d", iBit);
1997 pHlp->pfnPrintf(pHlp, "\n");
1998 }
1999 else
2000 {
2001 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2002 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
2003 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
2004 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
2005 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
2006 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
2007 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
2008 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
2009 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
2010 /* possibly indicating MM/HE and MM/HE-E on older chips... */
2011 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
2012 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
2013 pHlp->pfnPrintf(pHlp, "PHE - Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
2014 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
2015 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
2016 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
2017 for (unsigned iBit = 14; iBit < 32; iBit++)
2018 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
2019 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
2020 pHlp->pfnPrintf(pHlp, "\n");
2021 }
2022 }
2023}
2024
2025
2026/**
2027 * Structure used when disassembling and instructions in DBGF.
2028 * This is used so the reader function can get the stuff it needs.
2029 */
2030typedef struct CPUMDISASSTATE
2031{
2032 /** Pointer to the CPU structure. */
2033 PDISCPUSTATE pCpu;
2034 /** The VM handle. */
2035 PVM pVM;
2036 /** Pointer to the first byte in the segemnt. */
2037 RTGCUINTPTR GCPtrSegBase;
2038 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
2039 RTGCUINTPTR GCPtrSegEnd;
2040 /** The size of the segment minus 1. */
2041 RTGCUINTPTR cbSegLimit;
2042 /** Pointer to the current page - HC Ptr. */
2043 void const *pvPageHC;
2044 /** Pointer to the current page - GC Ptr. */
2045 RTGCPTR pvPageGC;
2046 /** The lock information that PGMPhysReleasePageMappingLock needs. */
2047 PGMPAGEMAPLOCK PageMapLock;
2048 /** Whether the PageMapLock is valid or not. */
2049 bool fLocked;
2050 /** 64 bits mode or not. */
2051 bool f64Bits;
2052} CPUMDISASSTATE, *PCPUMDISASSTATE;
2053
2054
2055/**
2056 * Instruction reader.
2057 *
2058 * @returns VBox status code.
2059 * @param PtrSrc Address to read from.
2060 * In our case this is relative to the selector pointed to by the 2nd user argument of uDisCpu.
2061 * @param pu8Dst Where to store the bytes.
2062 * @param cbRead Number of bytes to read.
2063 * @param uDisCpu Pointer to the disassembler cpu state.
2064 * In this context it's always pointer to the Core of a DBGFDISASSTATE.
2065 */
2066static DECLCALLBACK(int) cpumR3DisasInstrRead(RTUINTPTR PtrSrc, uint8_t *pu8Dst, unsigned cbRead, void *uDisCpu)
2067{
2068 PDISCPUSTATE pCpu = (PDISCPUSTATE)uDisCpu;
2069 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pCpu->apvUserData[0];
2070 Assert(cbRead > 0);
2071 for (;;)
2072 {
2073 RTGCUINTPTR GCPtr = PtrSrc + pState->GCPtrSegBase;
2074
2075 /* Need to update the page translation? */
2076 if ( !pState->pvPageHC
2077 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
2078 {
2079 int rc = VINF_SUCCESS;
2080
2081 /* translate the address */
2082 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
2083 if (MMHyperIsInsideArea(pState->pVM, pState->pvPageGC))
2084 {
2085 pState->pvPageHC = MMHyperGC2HC(pState->pVM, pState->pvPageGC);
2086 if (!pState->pvPageHC)
2087 rc = VERR_INVALID_POINTER;
2088 }
2089 else
2090 {
2091 /* Release mapping lock previously acquired. */
2092 if (pState->fLocked)
2093 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
2094 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVM, pState->pvPageGC, &pState->pvPageHC, &pState->PageMapLock);
2095 pState->fLocked = RT_SUCCESS_NP(rc);
2096 }
2097 if (VBOX_FAILURE(rc))
2098 {
2099 pState->pvPageHC = NULL;
2100 return rc;
2101 }
2102 }
2103
2104 /* check the segemnt limit */
2105 if (!pState->f64Bits && PtrSrc > pState->cbSegLimit)
2106 return VERR_OUT_OF_SELECTOR_BOUNDS;
2107
2108 /* calc how much we can read */
2109 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
2110 if (!pState->f64Bits)
2111 {
2112 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
2113 if (cb > cbSeg && cbSeg)
2114 cb = cbSeg;
2115 }
2116 if (cb > cbRead)
2117 cb = cbRead;
2118
2119 /* read and advance */
2120 memcpy(pu8Dst, (char *)pState->pvPageHC + (GCPtr & PAGE_OFFSET_MASK), cb);
2121 cbRead -= cb;
2122 if (!cbRead)
2123 return VINF_SUCCESS;
2124 pu8Dst += cb;
2125 PtrSrc += cb;
2126 }
2127}
2128
2129
2130/**
2131 * Disassemble an instruction and return the information in the provided structure.
2132 *
2133 * @returns VBox status code.
2134 * @param pVM VM Handle
2135 * @param pCtx CPU context
2136 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
2137 * @param pCpu Disassembly state
2138 * @param pszPrefix String prefix for logging (debug only)
2139 *
2140 */
2141CPUMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
2142{
2143 CPUMDISASSTATE State;
2144 int rc;
2145
2146 const PGMMODE enmMode = PGMGetGuestMode(pVM);
2147 State.pCpu = pCpu;
2148 State.pvPageGC = 0;
2149 State.pvPageHC = NULL;
2150 State.pVM = pVM;
2151 State.fLocked = false;
2152 State.f64Bits = false;
2153
2154 /*
2155 * Get selector information.
2156 */
2157 if ( (pCtx->cr0 & X86_CR0_PE)
2158 && pCtx->eflags.Bits.u1VM == 0)
2159 {
2160 if (CPUMAreHiddenSelRegsValid(pVM))
2161 {
2162 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->csHid.Attr.n.u1Long;
2163 State.GCPtrSegBase = pCtx->csHid.u64Base;
2164 State.GCPtrSegEnd = pCtx->csHid.u32Limit + 1 + (RTGCUINTPTR)pCtx->csHid.u64Base;
2165 State.cbSegLimit = pCtx->csHid.u32Limit;
2166 pCpu->mode = (State.f64Bits)
2167 ? CPUMODE_64BIT
2168 : pCtx->csHid.Attr.n.u1DefBig
2169 ? CPUMODE_32BIT
2170 : CPUMODE_16BIT;
2171 }
2172 else
2173 {
2174 SELMSELINFO SelInfo;
2175
2176 rc = SELMR3GetShadowSelectorInfo(pVM, pCtx->cs, &SelInfo);
2177 if (!VBOX_SUCCESS(rc))
2178 {
2179 AssertMsgFailed(("SELMR3GetShadowSelectorInfo failed for %04X:%VGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
2180 return rc;
2181 }
2182
2183 /*
2184 * Validate the selector.
2185 */
2186 rc = SELMSelInfoValidateCS(&SelInfo, pCtx->ss);
2187 if (!VBOX_SUCCESS(rc))
2188 {
2189 AssertMsgFailed(("SELMSelInfoValidateCS failed for %04X:%VGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
2190 return rc;
2191 }
2192 State.GCPtrSegBase = SelInfo.GCPtrBase;
2193 State.GCPtrSegEnd = SelInfo.cbLimit + 1 + (RTGCUINTPTR)SelInfo.GCPtrBase;
2194 State.cbSegLimit = SelInfo.cbLimit;
2195 pCpu->mode = SelInfo.Raw.Gen.u1DefBig ? CPUMODE_32BIT : CPUMODE_16BIT;
2196 }
2197 }
2198 else
2199 {
2200 /* real or V86 mode */
2201 pCpu->mode = CPUMODE_16BIT;
2202 State.GCPtrSegBase = pCtx->cs * 16;
2203 State.GCPtrSegEnd = 0xFFFFFFFF;
2204 State.cbSegLimit = 0xFFFFFFFF;
2205 }
2206
2207 /*
2208 * Disassemble the instruction.
2209 */
2210 pCpu->pfnReadBytes = cpumR3DisasInstrRead;
2211 pCpu->apvUserData[0] = &State;
2212
2213 uint32_t cbInstr;
2214#ifndef LOG_ENABLED
2215 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, NULL);
2216 if (VBOX_SUCCESS(rc))
2217 {
2218#else
2219 char szOutput[160];
2220 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, &szOutput[0]);
2221 if (VBOX_SUCCESS(rc))
2222 {
2223 /* log it */
2224 if (pszPrefix)
2225 Log(("%s: %s", pszPrefix, szOutput));
2226 else
2227 Log(("%s", szOutput));
2228#endif
2229 rc = VINF_SUCCESS;
2230 }
2231 else
2232 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%VGv rc=%Vrc\n", pCtx->cs, GCPtrPC, rc));
2233
2234 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
2235 if (State.fLocked)
2236 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
2237
2238 return rc;
2239}
2240
2241#ifdef DEBUG
2242
2243/**
2244 * Disassemble an instruction and dump it to the log
2245 *
2246 * @returns VBox status code.
2247 * @param pVM VM Handle
2248 * @param pCtx CPU context
2249 * @param pc GC instruction pointer
2250 * @param prefix String prefix for logging
2251 * @deprecated Use DBGFR3DisasInstrCurrentLog().
2252 *
2253 */
2254CPUMR3DECL(void) CPUMR3DisasmInstr(PVM pVM, PCPUMCTX pCtx, RTGCPTR pc, char *prefix)
2255{
2256 DISCPUSTATE cpu;
2257
2258 CPUMR3DisasmInstrCPU(pVM, pCtx, pc, &cpu, prefix);
2259}
2260
2261/**
2262 * Disassemble an instruction and dump it to the log
2263 *
2264 * @returns VBox status code.
2265 * @param pVM VM Handle
2266 * @param pCtx CPU context
2267 * @param pc GC instruction pointer
2268 * @param prefix String prefix for logging
2269 * @param nrInstructions
2270 *
2271 */
2272CPUMR3DECL(void) CPUMR3DisasmBlock(PVM pVM, PCPUMCTX pCtx, RTGCPTR pc, char *prefix, int nrInstructions)
2273{
2274 for(int i=0;i<nrInstructions;i++)
2275 {
2276 DISCPUSTATE cpu;
2277
2278 CPUMR3DisasmInstrCPU(pVM, pCtx, pc, &cpu, prefix);
2279 pc += cpu.opsize;
2280 }
2281}
2282
2283#endif /* DEBUG */
2284
2285#ifdef DEBUG
2286/**
2287 * Debug helper - Saves guest context on raw mode entry (for fatal dump)
2288 *
2289 * @internal
2290 */
2291CPUMR3DECL(void) CPUMR3SaveEntryCtx(PVM pVM)
2292{
2293 pVM->cpum.s.GuestEntry = pVM->cpum.s.Guest;
2294}
2295#endif /* DEBUG */
2296
2297
2298/**
2299 * API for controlling a few of the CPU features found in CR4.
2300 *
2301 * Currently only X86_CR4_TSD is accepted as input.
2302 *
2303 * @returns VBox status code.
2304 *
2305 * @param pVM The VM handle.
2306 * @param fOr The CR4 OR mask.
2307 * @param fAnd The CR4 AND mask.
2308 */
2309CPUMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
2310{
2311 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
2312 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
2313
2314 pVM->cpum.s.CR4.OrMask &= fAnd;
2315 pVM->cpum.s.CR4.OrMask |= fOr;
2316
2317 return VINF_SUCCESS;
2318}
2319
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette