VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 3486

Last change on this file since 3486 was 3301, checked in by vboxsync, 17 years ago

Update the MSR_IA32_FEATURE_CONTROL MSR ourselves if it's not locked.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 19.0 KB
Line 
1/* $Id: HWACCMR0.cpp 3301 2007-06-26 15:35:26Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include "HWVMXR0.h"
43#include "HWSVMR0.h"
44
45/**
46 * Does Ring-0 HWACCM initialization.
47 *
48 * This is mainly to check that the Host CPU mode is compatible
49 * with VMX.
50 *
51 * @returns VBox status code.
52 * @param pVM The VM to operate on.
53 */
54HWACCMR0DECL(int) HWACCMR0Init(PVM pVM)
55{
56 LogComFlow(("HWACCMR0Init: %p\n", pVM));
57
58 pVM->hwaccm.s.vmx.fSupported = false;;
59 pVM->hwaccm.s.svm.fSupported = false;;
60
61#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
62
63 pVM->hwaccm.s.fHWACCMR0Init = true;
64 pVM->hwaccm.s.ulLastError = VINF_SUCCESS;
65
66 /*
67 * Check for VMX capabilities
68 */
69 if (ASMHasCpuId())
70 {
71 uint32_t u32FeaturesECX;
72 uint32_t u32Dummy;
73 uint32_t u32FeaturesEDX;
74 uint32_t u32Vendor1, u32Vendor2, u32Vendor3;
75
76 ASMCpuId(0, &u32Dummy, &u32Vendor1, &u32Vendor3, &u32Vendor2);
77 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
78 /* Query AMD features. */
79 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &pVM->hwaccm.s.cpuid.u32AMDFeatureECX, &pVM->hwaccm.s.cpuid.u32AMDFeatureEDX);
80
81 if ( u32Vendor1 == 0x756e6547 /* Genu */
82 && u32Vendor2 == 0x49656e69 /* ineI */
83 && u32Vendor3 == 0x6c65746e /* ntel */
84 )
85 {
86 /*
87 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
88 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
89 */
90 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
91 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
92 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
93 )
94 {
95 pVM->hwaccm.s.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
96 /*
97 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
98 * Once the lock bit is set, this MSR can no longer be modified.
99 */
100 if (!(pVM->hwaccm.s.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
101 {
102 /* MSR is not yet locked; we can change it ourselves here */
103 pVM->hwaccm.s.vmx.msr.feature_ctrl |= (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK);
104 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, pVM->hwaccm.s.vmx.msr.feature_ctrl);
105 }
106
107 if ( (pVM->hwaccm.s.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
108 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
109 {
110 pVM->hwaccm.s.vmx.fSupported = true;
111 pVM->hwaccm.s.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
112 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
113 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
114 pVM->hwaccm.s.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
115 pVM->hwaccm.s.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
116 pVM->hwaccm.s.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
117 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
118 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
119 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
120 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
121 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
122
123 /*
124 * Check CR4.VMXE
125 */
126 pVM->hwaccm.s.vmx.hostCR4 = ASMGetCR4();
127 if (!(pVM->hwaccm.s.vmx.hostCR4 & X86_CR4_VMXE))
128 {
129 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
130 * try to execute the VMX instructions...
131 */
132 ASMSetCR4(pVM->hwaccm.s.vmx.hostCR4 | X86_CR4_VMXE);
133 }
134 }
135 else
136 pVM->hwaccm.s.ulLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
137 }
138 else
139 pVM->hwaccm.s.ulLastError = VERR_VMX_NO_VMX;
140 }
141 else
142 if ( u32Vendor1 == 0x68747541 /* Auth */
143 && u32Vendor2 == 0x69746e65 /* enti */
144 && u32Vendor3 == 0x444d4163 /* cAMD */
145 )
146 {
147 /*
148 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
149 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
150 */
151 if ( (pVM->hwaccm.s.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
152 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
153 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
154 )
155 {
156 uint64_t val;
157
158 /* Turn on SVM in the EFER MSR. */
159 val = ASMRdMsr(MSR_K6_EFER);
160 if (!(val & MSR_K6_EFER_SVME))
161 {
162 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
163 }
164 /* Paranoia. */
165 val = ASMRdMsr(MSR_K6_EFER);
166 if (val & MSR_K6_EFER_SVME)
167 {
168 /* Query AMD features. */
169 ASMCpuId(0x8000000A, &pVM->hwaccm.s.svm.u32Rev, &pVM->hwaccm.s.svm.u32MaxASID, &u32Dummy, &u32Dummy);
170
171 pVM->hwaccm.s.svm.fSupported = true;
172 }
173 else
174 {
175 pVM->hwaccm.s.ulLastError = VERR_SVM_ILLEGAL_EFER_MSR;
176 AssertFailed();
177 }
178 }
179 else
180 pVM->hwaccm.s.ulLastError = VERR_SVM_NO_SVM;
181 }
182 else
183 pVM->hwaccm.s.ulLastError = VERR_HWACCM_UNKNOWN_CPU;
184 }
185 else
186 pVM->hwaccm.s.ulLastError = VERR_HWACCM_NO_CPUID;
187
188#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
189
190 return VINF_SUCCESS;
191}
192
193
194/**
195 * Sets up and activates VMX
196 *
197 * @returns VBox status code.
198 * @param pVM The VM to operate on.
199 */
200HWACCMR0DECL(int) HWACCMR0SetupVMX(PVM pVM)
201{
202 int rc = VINF_SUCCESS;
203
204 if (pVM == NULL)
205 return VERR_INVALID_PARAMETER;
206
207 /* Setup Intel VMX. */
208 if (pVM->hwaccm.s.vmx.fSupported)
209 rc = VMXR0Setup(pVM);
210 else
211 rc = SVMR0Setup(pVM);
212
213 return rc;
214}
215
216
217/**
218 * Enable VMX or SVN
219 *
220 * @returns VBox status code.
221 * @param pVM The VM to operate on.
222 */
223HWACCMR0DECL(int) HWACCMR0Enable(PVM pVM)
224{
225 CPUMCTX *pCtx;
226 int rc;
227
228 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
229 if (VBOX_FAILURE(rc))
230 return rc;
231
232 /* Always load the guest's FPU/XMM state on-demand. */
233 CPUMDeactivateGuestFPUState(pVM);
234
235 /* Always reload the host context and the guest's CR0 register. (!!!!) */
236 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
237
238 if (pVM->hwaccm.s.vmx.fSupported)
239 {
240 rc = VMXR0Enable(pVM);
241 AssertRC(rc);
242 rc |= VMXR0SaveHostState(pVM);
243 AssertRC(rc);
244 rc |= VMXR0LoadGuestState(pVM, pCtx);
245 AssertRC(rc);
246 if (rc != VINF_SUCCESS)
247 return rc;
248 }
249 else
250 {
251 Assert(pVM->hwaccm.s.svm.fSupported);
252 rc = SVMR0Enable(pVM);
253 AssertRC(rc);
254 rc |= SVMR0LoadGuestState(pVM, pCtx);
255 AssertRC(rc);
256 if (rc != VINF_SUCCESS)
257 return rc;
258
259 }
260 return VINF_SUCCESS;
261}
262
263
264/**
265 * Disable VMX or SVN
266 *
267 * @returns VBox status code.
268 * @param pVM The VM to operate on.
269 */
270HWACCMR0DECL(int) HWACCMR0Disable(PVM pVM)
271{
272 CPUMCTX *pCtx;
273 int rc;
274
275 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
276 if (VBOX_FAILURE(rc))
277 return rc;
278
279 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
280 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
281 * or trash somebody else's FPU state.
282 */
283
284 /* Restore host FPU and XMM state if necessary. */
285 if (CPUMIsGuestFPUStateActive(pVM))
286 {
287 Log2(("CPUMRestoreHostFPUState\n"));
288 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
289 CPUMRestoreHostFPUState(pVM);
290
291 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
292 }
293
294 if (pVM->hwaccm.s.vmx.fSupported)
295 {
296 return VMXR0Disable(pVM);
297 }
298 else
299 {
300 Assert(pVM->hwaccm.s.svm.fSupported);
301 return SVMR0Disable(pVM);
302 }
303}
304
305/**
306 * Runs guest code in a hardware accelerated VM.
307 *
308 * @returns VBox status code.
309 * @param pVM The VM to operate on.
310 */
311HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
312{
313 CPUMCTX *pCtx;
314 int rc;
315
316 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
317 if (VBOX_FAILURE(rc))
318 return rc;
319
320 if (pVM->hwaccm.s.vmx.fSupported)
321 {
322 return VMXR0RunGuestCode(pVM, pCtx);
323 }
324 else
325 {
326 Assert(pVM->hwaccm.s.svm.fSupported);
327 return SVMR0RunGuestCode(pVM, pCtx);
328 }
329}
330
331
332#ifdef VBOX_STRICT
333#include <iprt/string.h>
334/**
335 * Dumps a descriptor.
336 *
337 * @param Desc Descriptor to dump.
338 * @param Sel Selector number.
339 * @param pszMsg Message to prepend the log entry with.
340 */
341HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
342{
343 /*
344 * Make variable description string.
345 */
346 static struct
347 {
348 unsigned cch;
349 const char *psz;
350 } const aTypes[32] =
351 {
352 #define STRENTRY(str) { sizeof(str) - 1, str }
353
354 /* system */
355#if HC_ARCH_BITS == 64
356 STRENTRY("Reserved0 "), /* 0x00 */
357 STRENTRY("Reserved1 "), /* 0x01 */
358 STRENTRY("LDT "), /* 0x02 */
359 STRENTRY("Reserved3 "), /* 0x03 */
360 STRENTRY("Reserved4 "), /* 0x04 */
361 STRENTRY("Reserved5 "), /* 0x05 */
362 STRENTRY("Reserved6 "), /* 0x06 */
363 STRENTRY("Reserved7 "), /* 0x07 */
364 STRENTRY("Reserved8 "), /* 0x08 */
365 STRENTRY("TSS64Avail "), /* 0x09 */
366 STRENTRY("ReservedA "), /* 0x0a */
367 STRENTRY("TSS64Busy "), /* 0x0b */
368 STRENTRY("Call64 "), /* 0x0c */
369 STRENTRY("ReservedD "), /* 0x0d */
370 STRENTRY("Int64 "), /* 0x0e */
371 STRENTRY("Trap64 "), /* 0x0f */
372#else
373 STRENTRY("Reserved0 "), /* 0x00 */
374 STRENTRY("TSS16Avail "), /* 0x01 */
375 STRENTRY("LDT "), /* 0x02 */
376 STRENTRY("TSS16Busy "), /* 0x03 */
377 STRENTRY("Call16 "), /* 0x04 */
378 STRENTRY("Task "), /* 0x05 */
379 STRENTRY("Int16 "), /* 0x06 */
380 STRENTRY("Trap16 "), /* 0x07 */
381 STRENTRY("Reserved8 "), /* 0x08 */
382 STRENTRY("TSS32Avail "), /* 0x09 */
383 STRENTRY("ReservedA "), /* 0x0a */
384 STRENTRY("TSS32Busy "), /* 0x0b */
385 STRENTRY("Call32 "), /* 0x0c */
386 STRENTRY("ReservedD "), /* 0x0d */
387 STRENTRY("Int32 "), /* 0x0e */
388 STRENTRY("Trap32 "), /* 0x0f */
389#endif
390 /* non system */
391 STRENTRY("DataRO "), /* 0x10 */
392 STRENTRY("DataRO Accessed "), /* 0x11 */
393 STRENTRY("DataRW "), /* 0x12 */
394 STRENTRY("DataRW Accessed "), /* 0x13 */
395 STRENTRY("DataDownRO "), /* 0x14 */
396 STRENTRY("DataDownRO Accessed "), /* 0x15 */
397 STRENTRY("DataDownRW "), /* 0x16 */
398 STRENTRY("DataDownRW Accessed "), /* 0x17 */
399 STRENTRY("CodeEO "), /* 0x18 */
400 STRENTRY("CodeEO Accessed "), /* 0x19 */
401 STRENTRY("CodeER "), /* 0x1a */
402 STRENTRY("CodeER Accessed "), /* 0x1b */
403 STRENTRY("CodeConfEO "), /* 0x1c */
404 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
405 STRENTRY("CodeConfER "), /* 0x1e */
406 STRENTRY("CodeConfER Accessed ") /* 0x1f */
407 #undef SYSENTRY
408 };
409 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
410 char szMsg[128];
411 char *psz = &szMsg[0];
412 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
413 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
414 psz += aTypes[i].cch;
415
416 if (Desc->Gen.u1Present)
417 ADD_STR(psz, "Present ");
418 else
419 ADD_STR(psz, "Not-Present ");
420#if HC_ARCH_BITS == 64
421 if (Desc->Gen.u1Long)
422 ADD_STR(psz, "64-bit ");
423 else
424 ADD_STR(psz, "Comp ");
425#else
426 if (Desc->Gen.u1Granularity)
427 ADD_STR(psz, "Page ");
428 if (Desc->Gen.u1DefBig)
429 ADD_STR(psz, "32-bit ");
430 else
431 ADD_STR(psz, "16-bit ");
432#endif
433 #undef ADD_STR
434 *psz = '\0';
435
436 /*
437 * Limit and Base and format the output.
438 */
439 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
440 if (Desc->Gen.u1Granularity)
441 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
442
443#if HC_ARCH_BITS == 64
444 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
445
446 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
447 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
448#else
449 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
450
451 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
452 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
453#endif
454}
455
456/**
457 * Formats a full register dump.
458 *
459 * @param pCtx The context to format.
460 */
461HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
462{
463 /*
464 * Format the flags.
465 */
466 static struct
467 {
468 const char *pszSet; const char *pszClear; uint32_t fFlag;
469 } aFlags[] =
470 {
471 { "vip",NULL, X86_EFL_VIP },
472 { "vif",NULL, X86_EFL_VIF },
473 { "ac", NULL, X86_EFL_AC },
474 { "vm", NULL, X86_EFL_VM },
475 { "rf", NULL, X86_EFL_RF },
476 { "nt", NULL, X86_EFL_NT },
477 { "ov", "nv", X86_EFL_OF },
478 { "dn", "up", X86_EFL_DF },
479 { "ei", "di", X86_EFL_IF },
480 { "tf", NULL, X86_EFL_TF },
481 { "nt", "pl", X86_EFL_SF },
482 { "nz", "zr", X86_EFL_ZF },
483 { "ac", "na", X86_EFL_AF },
484 { "po", "pe", X86_EFL_PF },
485 { "cy", "nc", X86_EFL_CF },
486 };
487 char szEFlags[80];
488 char *psz = szEFlags;
489 uint32_t efl = pCtx->eflags.u32;
490 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
491 {
492 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
493 if (pszAdd)
494 {
495 strcpy(psz, pszAdd);
496 psz += strlen(pszAdd);
497 *psz++ = ' ';
498 }
499 }
500 psz[-1] = '\0';
501
502
503 /*
504 * Format the registers.
505 */
506 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
507 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
508 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08x dr1=%08x\n"
509 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08x dr3=%08x\n"
510 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08x dr5=%08x\n"
511 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08x dr7=%08x\n"
512 ,
513 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
514 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
515 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
516 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
517 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
518 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
519
520 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08x cr2=%08x\n"
521 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08x cr4=%08x\n"
522 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
523 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
524 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
525 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
526 "FCW=%04x FSW=%04x FTW=%04x\n",
527 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
528 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
529 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
530 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
531 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
532 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
533 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
534
535
536}
537#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette