VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp@ 28879

Last change on this file since 28879 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.1 KB
Line 
1/* $Id: CPUMR0.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * CPUM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vm.h>
26#include <VBox/x86.h>
27#include <VBox/err.h>
28#include <VBox/log.h>
29#include <VBox/hwaccm.h>
30#include <iprt/assert.h>
31#include <iprt/asm.h>
32
33
34
35/**
36 * Does Ring-0 CPUM initialization.
37 *
38 * This is mainly to check that the Host CPU mode is compatible
39 * with VBox.
40 *
41 * @returns VBox status code.
42 * @param pVM The VM to operate on.
43 */
44VMMR0DECL(int) CPUMR0Init(PVM pVM)
45{
46 LogFlow(("CPUMR0Init: %p\n", pVM));
47
48 /*
49 * Check CR0 & CR4 flags.
50 */
51 uint32_t u32CR0 = ASMGetCR0();
52 if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
53 {
54 Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
55 return VERR_UNSUPPORTED_CPU_MODE;
56 }
57
58 /*
59 * Check for sysenter and syscall usage.
60 */
61 if (ASMHasCpuId())
62 {
63 /*
64 * SYSENTER/SYSEXIT
65 *
66 * Intel docs claim you should test both the flag and family, model &
67 * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
68 * but don't support it. AMD CPUs may support this feature in legacy
69 * mode, they've banned it from long mode. Since we switch to 32-bit
70 * mode when entering raw-mode context the feature would become
71 * accessible again on AMD CPUs, so we have to check regardless of
72 * host bitness.
73 */
74 uint32_t u32CpuVersion;
75 uint32_t u32Dummy;
76 uint32_t fFeatures;
77 ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
78 uint32_t u32Family = u32CpuVersion >> 8;
79 uint32_t u32Model = (u32CpuVersion >> 4) & 0xF;
80 uint32_t u32Stepping = u32CpuVersion & 0xF;
81 if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
82 && ( u32Family != 6 /* (> pentium pro) */
83 || u32Model >= 3
84 || u32Stepping >= 3
85 || !ASMIsIntelCpu())
86 )
87 {
88 /*
89 * Read the MSR and see if it's in use or not.
90 */
91 uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
92 if (u32)
93 {
94 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSENTER;
95 Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
96 }
97 }
98
99 /*
100 * SYSCALL/SYSRET
101 *
102 * This feature is indicated by the SEP bit returned in EDX by CPUID
103 * function 0x80000001. Intel CPUs only supports this feature in
104 * long mode. Since we're not running 64-bit guests in raw-mode there
105 * are no issues with 32-bit intel hosts.
106 */
107 uint32_t cExt = 0;
108 ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
109 if ( cExt >= 0x80000001
110 && cExt <= 0x8000ffff)
111 {
112 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
113 if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_SEP)
114 {
115#ifdef RT_ARCH_X86
116# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
117 if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
118# else
119 if (!ASMIsIntelCpu())
120# endif
121#endif
122 {
123 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
124 if (fEfer & MSR_K6_EFER_SCE)
125 {
126 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSCALL;
127 Log(("CPUMR0Init: host uses syscall\n"));
128 }
129 }
130 }
131 }
132 }
133
134
135 /*
136 * Check if debug registers are armed.
137 * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
138 */
139 uint32_t u32DR7 = ASMGetDR7();
140 if (u32DR7 & X86_DR7_ENABLED_MASK)
141 {
142 for (VMCPUID i = 0; i < pVM->cCpus; i++)
143 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
144 Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
145 }
146
147 return VINF_SUCCESS;
148}
149
150
151/**
152 * Lazily sync in the FPU/XMM state
153 *
154 * @returns VBox status code.
155 * @param pVM VM handle.
156 * @param pVCpu VMCPU handle.
157 * @param pCtx CPU context
158 */
159VMMR0DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
160{
161 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
162 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
163
164 /* If the FPU state has already been loaded, then it's a guest trap. */
165 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU)
166 {
167 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
168 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)));
169 return VINF_EM_RAW_GUEST_TRAP;
170 }
171
172 /*
173 * There are two basic actions:
174 * 1. Save host fpu and restore guest fpu.
175 * 2. Generate guest trap.
176 *
177 * When entering the hypervisor we'll always enable MP (for proper wait
178 * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
179 * is taken from the guest OS in order to get proper SSE handling.
180 *
181 *
182 * Actions taken depending on the guest CR0 flags:
183 *
184 * 3 2 1
185 * TS | EM | MP | FPUInstr | WAIT :: VMM Action
186 * ------------------------------------------------------------------------
187 * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
188 * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
189 * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
190 * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
191 * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
192 * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
193 * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
194 * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
195 */
196
197 switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
198 {
199 case X86_CR0_MP | X86_CR0_TS:
200 case X86_CR0_MP | X86_CR0_EM | X86_CR0_TS:
201 return VINF_EM_RAW_GUEST_TRAP;
202 default:
203 break;
204 }
205
206#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
207 if (CPUMIsGuestInLongModeEx(pCtx))
208 {
209 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
210
211 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
212 cpumR0SaveHostFPUState(&pVCpu->cpum.s);
213
214 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
215 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
216 }
217 else
218#endif
219 {
220#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
221# if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */
222 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE));
223 /** @todo Move the FFXR handling down into
224 * cpumR0SaveHostRestoreguestFPUState to optimize the
225 * VBOX_WITH_KERNEL_USING_XMM handling. */
226 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
227 uint64_t SavedEFER = 0;
228 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
229 {
230 SavedEFER = ASMRdMsr(MSR_K6_EFER);
231 if (SavedEFER & MSR_K6_EFER_FFXSR)
232 {
233 ASMWrMsr(MSR_K6_EFER, SavedEFER & ~MSR_K6_EFER_FFXSR);
234 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
235 }
236 }
237
238 /* Do the job and record that we've switched FPU state. */
239 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
240
241 /* Restore EFER. */
242 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
243 ASMWrMsr(MSR_K6_EFER, SavedEFER);
244
245# else
246 uint64_t oldMsrEFERHost = 0;
247 uint32_t oldCR0 = ASMGetCR0();
248
249 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
250 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
251 {
252 /** @todo Do we really need to read this every time?? The host could change this on the fly though.
253 * bird: what about starting by skipping the ASMWrMsr below if we didn't
254 * change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */
255 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
256 if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
257 {
258 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
259 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
260 }
261 }
262
263 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
264 int rc = CPUMHandleLazyFPU(pVCpu);
265 AssertRC(rc);
266 Assert(CPUMIsGuestFPUStateActive(pVCpu));
267
268 /* Restore EFER MSR */
269 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
270 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);
271
272 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
273 ASMSetCR0(oldCR0);
274# endif
275
276#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
277
278 /*
279 * Save the FPU control word and MXCSR, so we can restore the state properly afterwards.
280 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
281 */
282 pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW();
283 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
284 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();
285
286 cpumR0LoadFPU(pCtx);
287
288 /*
289 * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.
290 *
291 * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored
292 */
293 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
294 {
295 /** @todo Do we really need to read this every time?? The host could change this on the fly though. */
296 uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER);
297
298 if (msrEFERHost & MSR_K6_EFER_FFXSR)
299 {
300 /* fxrstor doesn't restore the XMM state! */
301 cpumR0LoadXMM(pCtx);
302 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
303 }
304 }
305
306#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
307 }
308
309 Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
310 return VINF_SUCCESS;
311}
312
313
314/**
315 * Save guest FPU/XMM state
316 *
317 * @returns VBox status code.
318 * @param pVM VM handle.
319 * @param pVCpu VMCPU handle.
320 * @param pCtx CPU context
321 */
322VMMR0DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
323{
324 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
325 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
326 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
327
328#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
329 if (CPUMIsGuestInLongModeEx(pCtx))
330 {
331 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
332 {
333 HWACCMR0SaveFPUState(pVM, pVCpu, pCtx);
334 cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
335 }
336 /* else nothing to do; we didn't perform a world switch */
337 }
338 else
339#endif
340 {
341#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
342# ifdef VBOX_WITH_KERNEL_USING_XMM
343 /*
344 * We've already saved the XMM registers in the assembly wrapper, so
345 * we have to save them before saving the entire FPU state and put them
346 * back afterwards.
347 */
348 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
349 * I'm not able to test such an optimization tonight.
350 * We could just all this in assembly. */
351 uint128_t aGuestXmmRegs[16];
352 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
353# endif
354
355 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
356 uint64_t oldMsrEFERHost = 0;
357 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
358 {
359 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
360 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
361 }
362 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
363
364 /* Restore EFER MSR */
365 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
366 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
367
368# ifdef VBOX_WITH_KERNEL_USING_XMM
369 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
370# endif
371
372#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
373# ifdef VBOX_WITH_KERNEL_USING_XMM
374# error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now."
375# endif
376 cpumR0SaveFPU(pCtx);
377 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
378 {
379 /* fxsave doesn't save the XMM state! */
380 cpumR0SaveXMM(pCtx);
381 }
382
383 /*
384 * Restore the original FPU control word and MXCSR.
385 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
386 */
387 cpumR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW);
388 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
389 cpumR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
390#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
391 }
392
393 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_MANUAL_XMM_RESTORE);
394 return VINF_SUCCESS;
395}
396
397
398/**
399 * Save guest debug state
400 *
401 * @returns VBox status code.
402 * @param pVM VM handle.
403 * @param pVCpu VMCPU handle.
404 * @param pCtx CPU context
405 * @param fDR6 Include DR6 or not
406 */
407VMMR0DECL(int) CPUMR0SaveGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
408{
409 Assert(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS);
410
411 /* Save the guest's debug state. The caller is responsible for DR7. */
412#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
413 if (CPUMIsGuestInLongModeEx(pCtx))
414 {
415 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_STATE))
416 {
417 uint64_t dr6 = pCtx->dr[6];
418
419 HWACCMR0SaveDebugState(pVM, pVCpu, pCtx);
420 if (!fDR6) /* dr6 was already up-to-date */
421 pCtx->dr[6] = dr6;
422 }
423 }
424 else
425#endif
426 {
427#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
428 cpumR0SaveDRx(&pCtx->dr[0]);
429#else
430 pCtx->dr[0] = ASMGetDR0();
431 pCtx->dr[1] = ASMGetDR1();
432 pCtx->dr[2] = ASMGetDR2();
433 pCtx->dr[3] = ASMGetDR3();
434#endif
435 if (fDR6)
436 pCtx->dr[6] = ASMGetDR6();
437 }
438
439 /*
440 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
441 * DR7 contains 0x400 right now.
442 */
443 CPUMR0LoadHostDebugState(pVM, pVCpu);
444 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS));
445 return VINF_SUCCESS;
446}
447
448
449/**
450 * Lazily sync in the debug state
451 *
452 * @returns VBox status code.
453 * @param pVM VM handle.
454 * @param pVCpu VMCPU handle.
455 * @param pCtx CPU context
456 * @param fDR6 Include DR6 or not
457 */
458VMMR0DECL(int) CPUMR0LoadGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
459{
460 /* Save the host state. */
461 CPUMR0SaveHostDebugState(pVM, pVCpu);
462 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
463
464 /* Activate the guest state DR0-3; DR7 is left to the caller. */
465#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
466 if (CPUMIsGuestInLongModeEx(pCtx))
467 {
468 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
469 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_STATE;
470 }
471 else
472#endif
473 {
474#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
475 cpumR0LoadDRx(&pCtx->dr[0]);
476#else
477 ASMSetDR0(pCtx->dr[0]);
478 ASMSetDR1(pCtx->dr[1]);
479 ASMSetDR2(pCtx->dr[2]);
480 ASMSetDR3(pCtx->dr[3]);
481#endif
482 if (fDR6)
483 ASMSetDR6(pCtx->dr[6]);
484 }
485
486 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
487 return VINF_SUCCESS;
488}
489
490/**
491 * Save the host debug state
492 *
493 * @returns VBox status code.
494 * @param pVM VM handle.
495 * @param pVCpu VMCPU handle.
496 */
497VMMR0DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu)
498{
499 /* Save the host state. */
500#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
501 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
502 cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);
503#else
504 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
505 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
506 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
507 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
508#endif
509 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
510 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
511 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
512 /* Make sure DR7 is harmless or else we could trigger breakpoints when restoring dr0-3 (!) */
513 ASMSetDR7(X86_DR7_INIT_VAL);
514
515 return VINF_SUCCESS;
516}
517
518/**
519 * Load the host debug state
520 *
521 * @returns VBox status code.
522 * @param pVM VM handle.
523 * @param pVCpu VMCPU handle.
524 */
525VMMR0DECL(int) CPUMR0LoadHostDebugState(PVM pVM, PVMCPU pVCpu)
526{
527 Assert(pVCpu->cpum.s.fUseFlags & (CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HYPER));
528
529 /*
530 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
531 * DR7 contains 0x400 right now.
532 */
533#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
534 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
535 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
536#else
537 ASMSetDR0(pVCpu->cpum.s.Host.dr0);
538 ASMSetDR1(pVCpu->cpum.s.Host.dr1);
539 ASMSetDR2(pVCpu->cpum.s.Host.dr2);
540 ASMSetDR3(pVCpu->cpum.s.Host.dr3);
541#endif
542 ASMSetDR6(pVCpu->cpum.s.Host.dr6);
543 ASMSetDR7(pVCpu->cpum.s.Host.dr7);
544
545 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HYPER);
546 return VINF_SUCCESS;
547}
548
549
550/**
551 * Lazily sync in the hypervisor debug state
552 *
553 * @returns VBox status code.
554 * @param pVM VM handle.
555 * @param pVCpu VMCPU handle.
556 * @param pCtx CPU context
557 * @param fDR6 Include DR6 or not
558 */
559VMMR0DECL(int) CPUMR0LoadHyperDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
560{
561 /* Save the host state. */
562 CPUMR0SaveHostDebugState(pVM, pVCpu);
563 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
564
565 /* Activate the guest state DR0-3; DR7 is left to the caller. */
566#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
567 if (CPUMIsGuestInLongModeEx(pCtx))
568 {
569 AssertFailed();
570 return VERR_NOT_IMPLEMENTED;
571 }
572 else
573#endif
574 {
575#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
576 AssertFailed();
577 return VERR_NOT_IMPLEMENTED;
578#else
579 ASMSetDR0(CPUMGetHyperDR0(pVCpu));
580 ASMSetDR1(CPUMGetHyperDR1(pVCpu));
581 ASMSetDR2(CPUMGetHyperDR2(pVCpu));
582 ASMSetDR3(CPUMGetHyperDR3(pVCpu));
583#endif
584 if (fDR6)
585 ASMSetDR6(CPUMGetHyperDR6(pVCpu));
586 }
587
588 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
589 return VINF_SUCCESS;
590}
591
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette