VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 3834

Last change on this file since 3834 was 3750, checked in by vboxsync, 18 years ago

Check if SVM is disabled in the BIOS. Otherwise enabling it with wrmsr will cause a #GP.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 19.3 KB
Line 
1/* $Id: HWACCMR0.cpp 3750 2007-07-20 16:38:23Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include "HWVMXR0.h"
43#include "HWSVMR0.h"
44
45/**
46 * Does Ring-0 HWACCM initialization.
47 *
48 * This is mainly to check that the Host CPU mode is compatible
49 * with VMX.
50 *
51 * @returns VBox status code.
52 * @param pVM The VM to operate on.
53 */
54HWACCMR0DECL(int) HWACCMR0Init(PVM pVM)
55{
56 LogComFlow(("HWACCMR0Init: %p\n", pVM));
57
58 pVM->hwaccm.s.vmx.fSupported = false;;
59 pVM->hwaccm.s.svm.fSupported = false;;
60
61#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
62
63 pVM->hwaccm.s.fHWACCMR0Init = true;
64 pVM->hwaccm.s.ulLastError = VINF_SUCCESS;
65
66 /*
67 * Check for VMX capabilities
68 */
69 if (ASMHasCpuId())
70 {
71 uint32_t u32FeaturesECX;
72 uint32_t u32Dummy;
73 uint32_t u32FeaturesEDX;
74 uint32_t u32Vendor1, u32Vendor2, u32Vendor3;
75
76 ASMCpuId(0, &u32Dummy, &u32Vendor1, &u32Vendor3, &u32Vendor2);
77 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
78 /* Query AMD features. */
79 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &pVM->hwaccm.s.cpuid.u32AMDFeatureECX, &pVM->hwaccm.s.cpuid.u32AMDFeatureEDX);
80
81 if ( u32Vendor1 == 0x756e6547 /* Genu */
82 && u32Vendor2 == 0x49656e69 /* ineI */
83 && u32Vendor3 == 0x6c65746e /* ntel */
84 )
85 {
86 /*
87 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
88 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
89 */
90 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
91 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
92 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
93 )
94 {
95 pVM->hwaccm.s.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
96 /*
97 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
98 * Once the lock bit is set, this MSR can no longer be modified.
99 */
100 if (!(pVM->hwaccm.s.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
101 {
102 /* MSR is not yet locked; we can change it ourselves here */
103 pVM->hwaccm.s.vmx.msr.feature_ctrl |= (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK);
104 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, pVM->hwaccm.s.vmx.msr.feature_ctrl);
105 }
106
107 if ( (pVM->hwaccm.s.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
108 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
109 {
110 pVM->hwaccm.s.vmx.fSupported = true;
111 pVM->hwaccm.s.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
112 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
113 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
114 pVM->hwaccm.s.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
115 pVM->hwaccm.s.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
116 pVM->hwaccm.s.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
117 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
118 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
119 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
120 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
121 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
122
123 /*
124 * Check CR4.VMXE
125 */
126 pVM->hwaccm.s.vmx.hostCR4 = ASMGetCR4();
127 if (!(pVM->hwaccm.s.vmx.hostCR4 & X86_CR4_VMXE))
128 {
129 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
130 * try to execute the VMX instructions...
131 */
132 ASMSetCR4(pVM->hwaccm.s.vmx.hostCR4 | X86_CR4_VMXE);
133 }
134 }
135 else
136 pVM->hwaccm.s.ulLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
137 }
138 else
139 pVM->hwaccm.s.ulLastError = VERR_VMX_NO_VMX;
140 }
141 else
142 if ( u32Vendor1 == 0x68747541 /* Auth */
143 && u32Vendor2 == 0x69746e65 /* enti */
144 && u32Vendor3 == 0x444d4163 /* cAMD */
145 )
146 {
147 /*
148 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
149 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
150 */
151 if ( (pVM->hwaccm.s.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
152 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
153 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
154 )
155 {
156 uint64_t val;
157
158 /* Check if SVM is disabled */
159 val = ASMRdMsr(MSR_K8_VM_CR);
160 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
161 {
162 /* Turn on SVM in the EFER MSR. */
163 val = ASMRdMsr(MSR_K6_EFER);
164 if (!(val & MSR_K6_EFER_SVME))
165 {
166 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
167 }
168 /* Paranoia. */
169 val = ASMRdMsr(MSR_K6_EFER);
170 if (val & MSR_K6_EFER_SVME)
171 {
172 /* Query AMD features. */
173 ASMCpuId(0x8000000A, &pVM->hwaccm.s.svm.u32Rev, &pVM->hwaccm.s.svm.u32MaxASID, &u32Dummy, &u32Dummy);
174
175 pVM->hwaccm.s.svm.fSupported = true;
176 }
177 else
178 {
179 pVM->hwaccm.s.ulLastError = VERR_SVM_ILLEGAL_EFER_MSR;
180 AssertFailed();
181 }
182 }
183 else
184 pVM->hwaccm.s.ulLastError = VERR_SVM_DISABLED;
185 }
186 else
187 pVM->hwaccm.s.ulLastError = VERR_SVM_NO_SVM;
188 }
189 else
190 pVM->hwaccm.s.ulLastError = VERR_HWACCM_UNKNOWN_CPU;
191 }
192 else
193 pVM->hwaccm.s.ulLastError = VERR_HWACCM_NO_CPUID;
194
195#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
196
197 return VINF_SUCCESS;
198}
199
200
201/**
202 * Sets up and activates VMX
203 *
204 * @returns VBox status code.
205 * @param pVM The VM to operate on.
206 */
207HWACCMR0DECL(int) HWACCMR0SetupVMX(PVM pVM)
208{
209 int rc = VINF_SUCCESS;
210
211 if (pVM == NULL)
212 return VERR_INVALID_PARAMETER;
213
214 /* Setup Intel VMX. */
215 if (pVM->hwaccm.s.vmx.fSupported)
216 rc = VMXR0Setup(pVM);
217 else
218 rc = SVMR0Setup(pVM);
219
220 return rc;
221}
222
223
224/**
225 * Enable VMX or SVN
226 *
227 * @returns VBox status code.
228 * @param pVM The VM to operate on.
229 */
230HWACCMR0DECL(int) HWACCMR0Enable(PVM pVM)
231{
232 CPUMCTX *pCtx;
233 int rc;
234
235 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
236 if (VBOX_FAILURE(rc))
237 return rc;
238
239 /* Always load the guest's FPU/XMM state on-demand. */
240 CPUMDeactivateGuestFPUState(pVM);
241
242 /* Always reload the host context and the guest's CR0 register. (!!!!) */
243 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
244
245 if (pVM->hwaccm.s.vmx.fSupported)
246 {
247 rc = VMXR0Enable(pVM);
248 AssertRC(rc);
249 rc |= VMXR0SaveHostState(pVM);
250 AssertRC(rc);
251 rc |= VMXR0LoadGuestState(pVM, pCtx);
252 AssertRC(rc);
253 if (rc != VINF_SUCCESS)
254 return rc;
255 }
256 else
257 {
258 Assert(pVM->hwaccm.s.svm.fSupported);
259 rc = SVMR0Enable(pVM);
260 AssertRC(rc);
261 rc |= SVMR0LoadGuestState(pVM, pCtx);
262 AssertRC(rc);
263 if (rc != VINF_SUCCESS)
264 return rc;
265
266 }
267 return VINF_SUCCESS;
268}
269
270
271/**
272 * Disable VMX or SVN
273 *
274 * @returns VBox status code.
275 * @param pVM The VM to operate on.
276 */
277HWACCMR0DECL(int) HWACCMR0Disable(PVM pVM)
278{
279 CPUMCTX *pCtx;
280 int rc;
281
282 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
283 if (VBOX_FAILURE(rc))
284 return rc;
285
286 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
287 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
288 * or trash somebody else's FPU state.
289 */
290
291 /* Restore host FPU and XMM state if necessary. */
292 if (CPUMIsGuestFPUStateActive(pVM))
293 {
294 Log2(("CPUMRestoreHostFPUState\n"));
295 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
296 CPUMRestoreHostFPUState(pVM);
297
298 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
299 }
300
301 if (pVM->hwaccm.s.vmx.fSupported)
302 {
303 return VMXR0Disable(pVM);
304 }
305 else
306 {
307 Assert(pVM->hwaccm.s.svm.fSupported);
308 return SVMR0Disable(pVM);
309 }
310}
311
312/**
313 * Runs guest code in a hardware accelerated VM.
314 *
315 * @returns VBox status code.
316 * @param pVM The VM to operate on.
317 */
318HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
319{
320 CPUMCTX *pCtx;
321 int rc;
322
323 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
324 if (VBOX_FAILURE(rc))
325 return rc;
326
327 if (pVM->hwaccm.s.vmx.fSupported)
328 {
329 return VMXR0RunGuestCode(pVM, pCtx);
330 }
331 else
332 {
333 Assert(pVM->hwaccm.s.svm.fSupported);
334 return SVMR0RunGuestCode(pVM, pCtx);
335 }
336}
337
338
339#ifdef VBOX_STRICT
340#include <iprt/string.h>
341/**
342 * Dumps a descriptor.
343 *
344 * @param Desc Descriptor to dump.
345 * @param Sel Selector number.
346 * @param pszMsg Message to prepend the log entry with.
347 */
348HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
349{
350 /*
351 * Make variable description string.
352 */
353 static struct
354 {
355 unsigned cch;
356 const char *psz;
357 } const aTypes[32] =
358 {
359 #define STRENTRY(str) { sizeof(str) - 1, str }
360
361 /* system */
362#if HC_ARCH_BITS == 64
363 STRENTRY("Reserved0 "), /* 0x00 */
364 STRENTRY("Reserved1 "), /* 0x01 */
365 STRENTRY("LDT "), /* 0x02 */
366 STRENTRY("Reserved3 "), /* 0x03 */
367 STRENTRY("Reserved4 "), /* 0x04 */
368 STRENTRY("Reserved5 "), /* 0x05 */
369 STRENTRY("Reserved6 "), /* 0x06 */
370 STRENTRY("Reserved7 "), /* 0x07 */
371 STRENTRY("Reserved8 "), /* 0x08 */
372 STRENTRY("TSS64Avail "), /* 0x09 */
373 STRENTRY("ReservedA "), /* 0x0a */
374 STRENTRY("TSS64Busy "), /* 0x0b */
375 STRENTRY("Call64 "), /* 0x0c */
376 STRENTRY("ReservedD "), /* 0x0d */
377 STRENTRY("Int64 "), /* 0x0e */
378 STRENTRY("Trap64 "), /* 0x0f */
379#else
380 STRENTRY("Reserved0 "), /* 0x00 */
381 STRENTRY("TSS16Avail "), /* 0x01 */
382 STRENTRY("LDT "), /* 0x02 */
383 STRENTRY("TSS16Busy "), /* 0x03 */
384 STRENTRY("Call16 "), /* 0x04 */
385 STRENTRY("Task "), /* 0x05 */
386 STRENTRY("Int16 "), /* 0x06 */
387 STRENTRY("Trap16 "), /* 0x07 */
388 STRENTRY("Reserved8 "), /* 0x08 */
389 STRENTRY("TSS32Avail "), /* 0x09 */
390 STRENTRY("ReservedA "), /* 0x0a */
391 STRENTRY("TSS32Busy "), /* 0x0b */
392 STRENTRY("Call32 "), /* 0x0c */
393 STRENTRY("ReservedD "), /* 0x0d */
394 STRENTRY("Int32 "), /* 0x0e */
395 STRENTRY("Trap32 "), /* 0x0f */
396#endif
397 /* non system */
398 STRENTRY("DataRO "), /* 0x10 */
399 STRENTRY("DataRO Accessed "), /* 0x11 */
400 STRENTRY("DataRW "), /* 0x12 */
401 STRENTRY("DataRW Accessed "), /* 0x13 */
402 STRENTRY("DataDownRO "), /* 0x14 */
403 STRENTRY("DataDownRO Accessed "), /* 0x15 */
404 STRENTRY("DataDownRW "), /* 0x16 */
405 STRENTRY("DataDownRW Accessed "), /* 0x17 */
406 STRENTRY("CodeEO "), /* 0x18 */
407 STRENTRY("CodeEO Accessed "), /* 0x19 */
408 STRENTRY("CodeER "), /* 0x1a */
409 STRENTRY("CodeER Accessed "), /* 0x1b */
410 STRENTRY("CodeConfEO "), /* 0x1c */
411 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
412 STRENTRY("CodeConfER "), /* 0x1e */
413 STRENTRY("CodeConfER Accessed ") /* 0x1f */
414 #undef SYSENTRY
415 };
416 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
417 char szMsg[128];
418 char *psz = &szMsg[0];
419 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
420 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
421 psz += aTypes[i].cch;
422
423 if (Desc->Gen.u1Present)
424 ADD_STR(psz, "Present ");
425 else
426 ADD_STR(psz, "Not-Present ");
427#if HC_ARCH_BITS == 64
428 if (Desc->Gen.u1Long)
429 ADD_STR(psz, "64-bit ");
430 else
431 ADD_STR(psz, "Comp ");
432#else
433 if (Desc->Gen.u1Granularity)
434 ADD_STR(psz, "Page ");
435 if (Desc->Gen.u1DefBig)
436 ADD_STR(psz, "32-bit ");
437 else
438 ADD_STR(psz, "16-bit ");
439#endif
440 #undef ADD_STR
441 *psz = '\0';
442
443 /*
444 * Limit and Base and format the output.
445 */
446 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
447 if (Desc->Gen.u1Granularity)
448 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
449
450#if HC_ARCH_BITS == 64
451 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
452
453 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
454 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
455#else
456 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
457
458 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
459 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
460#endif
461}
462
463/**
464 * Formats a full register dump.
465 *
466 * @param pCtx The context to format.
467 */
468HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
469{
470 /*
471 * Format the flags.
472 */
473 static struct
474 {
475 const char *pszSet; const char *pszClear; uint32_t fFlag;
476 } aFlags[] =
477 {
478 { "vip",NULL, X86_EFL_VIP },
479 { "vif",NULL, X86_EFL_VIF },
480 { "ac", NULL, X86_EFL_AC },
481 { "vm", NULL, X86_EFL_VM },
482 { "rf", NULL, X86_EFL_RF },
483 { "nt", NULL, X86_EFL_NT },
484 { "ov", "nv", X86_EFL_OF },
485 { "dn", "up", X86_EFL_DF },
486 { "ei", "di", X86_EFL_IF },
487 { "tf", NULL, X86_EFL_TF },
488 { "nt", "pl", X86_EFL_SF },
489 { "nz", "zr", X86_EFL_ZF },
490 { "ac", "na", X86_EFL_AF },
491 { "po", "pe", X86_EFL_PF },
492 { "cy", "nc", X86_EFL_CF },
493 };
494 char szEFlags[80];
495 char *psz = szEFlags;
496 uint32_t efl = pCtx->eflags.u32;
497 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
498 {
499 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
500 if (pszAdd)
501 {
502 strcpy(psz, pszAdd);
503 psz += strlen(pszAdd);
504 *psz++ = ' ';
505 }
506 }
507 psz[-1] = '\0';
508
509
510 /*
511 * Format the registers.
512 */
513 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
514 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
515 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08x dr1=%08x\n"
516 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08x dr3=%08x\n"
517 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08x dr5=%08x\n"
518 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08x dr7=%08x\n"
519 ,
520 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
521 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
522 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
523 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
524 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
525 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
526
527 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08x cr2=%08x\n"
528 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08x cr4=%08x\n"
529 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
530 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
531 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
532 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
533 "FCW=%04x FSW=%04x FTW=%04x\n",
534 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
535 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
536 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
537 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
538 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
539 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
540 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
541
542
543}
544#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette