VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 9817

Last change on this file since 9817 was 9817, checked in by vboxsync, 16 years ago

fs & gs base cleanup

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 47.6 KB
Line 
1/* $Id: CPUMAllRegs.cpp 9817 2008-06-19 11:47:38Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Gets and Sets.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38
39
40
41/** Disable stack frame pointer generation here. */
42#if defined(_MSC_VER) && !defined(DEBUG)
43# pragma optimize("y", off)
44#endif
45
46
47/**
48 * Sets or resets an alternative hypervisor context core.
49 *
50 * This is called when we get a hypervisor trap set switch the context
51 * core with the trap frame on the stack. It is called again to reset
52 * back to the default context core when resuming hypervisor execution.
53 *
54 * @param pVM The VM handle.
55 * @param pCtxCore Pointer to the alternative context core or NULL
56 * to go back to the default context core.
57 */
58CPUMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
59{
60 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTXALLSUFF(pHyperCore), pCtxCore));
61 if (!pCtxCore)
62 {
63 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
64 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
65 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
66 pVM->cpum.s.pHyperCoreGC = (RCPTRTYPE(PCPUMCTXCORE))VM_GUEST_ADDR(pVM, pCtxCore);
67 }
68 else
69 {
70 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
71 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
72 pVM->cpum.s.pHyperCoreGC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToGC(pVM, pCtxCore);
73 }
74}
75
76
77/**
78 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
79 * This is only for reading in order to save a few calls.
80 *
81 * @param pVM Handle to the virtual machine.
82 */
83CPUMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
84{
85 return pVM->cpum.s.CTXALLSUFF(pHyperCore);
86}
87
88
89/**
90 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
91 *
92 * @returns VBox status code.
93 * @param pVM Handle to the virtual machine.
94 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
95 *
96 * @deprecated This will *not* (and has never) given the right picture of the
97 * hypervisor register state. With CPUMHyperSetCtxCore() this is
98 * getting much worse. So, use the individual functions for getting
99 * and esp. setting the hypervisor registers.
100 */
101CPUMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
102{
103 *ppCtx = &pVM->cpum.s.Hyper;
104 return VINF_SUCCESS;
105}
106
107CPUMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
108{
109 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
110 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
111 pVM->cpum.s.Hyper.gdtrPadding = 0;
112}
113
114CPUMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
115{
116 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
117 pVM->cpum.s.Hyper.idtr.pIdt = addr;
118 pVM->cpum.s.Hyper.idtrPadding = 0;
119}
120
121CPUMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
122{
123 pVM->cpum.s.Hyper.cr3 = cr3;
124}
125
126CPUMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
127{
128 pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs = SelCS;
129}
130
131CPUMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
132{
133 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds = SelDS;
134}
135
136CPUMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
137{
138 pVM->cpum.s.CTXALLSUFF(pHyperCore)->es = SelES;
139}
140
141CPUMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
142{
143 pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs = SelFS;
144}
145
146CPUMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
147{
148 pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs = SelGS;
149}
150
151CPUMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
152{
153 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss = SelSS;
154}
155
156CPUMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
157{
158 pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp = u32ESP;
159}
160
161CPUMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
162{
163 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32 = Efl;
164 return VINF_SUCCESS;
165}
166
167CPUMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
168{
169 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip = u32EIP;
170}
171
172CPUMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
173{
174 pVM->cpum.s.Hyper.tr = SelTR;
175}
176
177CPUMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
178{
179 pVM->cpum.s.Hyper.ldtr = SelLDTR;
180}
181
182CPUMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
183{
184 pVM->cpum.s.Hyper.dr0 = uDr0;
185 /** @todo in GC we must load it! */
186}
187
188CPUMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
189{
190 pVM->cpum.s.Hyper.dr1 = uDr1;
191 /** @todo in GC we must load it! */
192}
193
194CPUMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
195{
196 pVM->cpum.s.Hyper.dr2 = uDr2;
197 /** @todo in GC we must load it! */
198}
199
200CPUMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
201{
202 pVM->cpum.s.Hyper.dr3 = uDr3;
203 /** @todo in GC we must load it! */
204}
205
206CPUMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
207{
208 pVM->cpum.s.Hyper.dr6 = uDr6;
209 /** @todo in GC we must load it! */
210}
211
212CPUMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
213{
214 pVM->cpum.s.Hyper.dr7 = uDr7;
215 /** @todo in GC we must load it! */
216}
217
218
219CPUMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
220{
221 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs;
222}
223
224CPUMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
225{
226 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds;
227}
228
229CPUMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
230{
231 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->es;
232}
233
234CPUMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
235{
236 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs;
237}
238
239CPUMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
240{
241 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs;
242}
243
244CPUMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
245{
246 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss;
247}
248
249#if 0 /* these are not correct. */
250
251CPUMDECL(uint32_t) CPUMGetHyperCR0(PVM pVM)
252{
253 return pVM->cpum.s.Hyper.cr0;
254}
255
256CPUMDECL(uint32_t) CPUMGetHyperCR2(PVM pVM)
257{
258 return pVM->cpum.s.Hyper.cr2;
259}
260
261CPUMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM)
262{
263 return pVM->cpum.s.Hyper.cr3;
264}
265
266CPUMDECL(uint32_t) CPUMGetHyperCR4(PVM pVM)
267{
268 return pVM->cpum.s.Hyper.cr4;
269}
270
271#endif /* not correct */
272
273CPUMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
274{
275 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eax;
276}
277
278CPUMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
279{
280 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebx;
281}
282
283CPUMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
284{
285 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ecx;
286}
287
288CPUMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
289{
290 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edx;
291}
292
293CPUMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
294{
295 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esi;
296}
297
298CPUMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
299{
300 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edi;
301}
302
303CPUMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
304{
305 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebp;
306}
307
308CPUMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
309{
310 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp;
311}
312
313CPUMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
314{
315 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32;
316}
317
318CPUMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
319{
320 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip;
321}
322
323CPUMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
324{
325 if (pcbLimit)
326 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
327 return pVM->cpum.s.Hyper.idtr.pIdt;
328}
329
330CPUMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
331{
332 if (pcbLimit)
333 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
334 return pVM->cpum.s.Hyper.gdtr.pGdt;
335}
336
337CPUMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
338{
339 return pVM->cpum.s.Hyper.ldtr;
340}
341
342CPUMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
343{
344 return pVM->cpum.s.Hyper.dr0;
345}
346
347CPUMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
348{
349 return pVM->cpum.s.Hyper.dr1;
350}
351
352CPUMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
353{
354 return pVM->cpum.s.Hyper.dr2;
355}
356
357CPUMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
358{
359 return pVM->cpum.s.Hyper.dr3;
360}
361
362CPUMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
363{
364 return pVM->cpum.s.Hyper.dr6;
365}
366
367CPUMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
368{
369 return pVM->cpum.s.Hyper.dr7;
370}
371
372
373/**
374 * Gets the pointer to the internal CPUMCTXCORE structure.
375 * This is only for reading in order to save a few calls.
376 *
377 * @param pVM Handle to the virtual machine.
378 */
379CPUMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
380{
381 return CPUMCTX2CORE(&pVM->cpum.s.Guest);
382}
383
384
385/**
386 * Sets the guest context core registers.
387 *
388 * @param pVM Handle to the virtual machine.
389 * @param pCtxCore The new context core values.
390 */
391CPUMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
392{
393 /** @todo #1410 requires selectors to be checked. */
394
395 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVM->cpum.s.Guest);
396 *pCtxCoreDst = *pCtxCore;
397
398 /* Mask away invalid parts of the cpu context. */
399 if (!CPUMIsGuestInLongMode(pVM))
400 {
401 uint64_t u64Mask = UINT64_C(0xffffffff);
402
403 pCtxCoreDst->rip &= u64Mask;
404 pCtxCoreDst->rax &= u64Mask;
405 pCtxCoreDst->rbx &= u64Mask;
406 pCtxCoreDst->rcx &= u64Mask;
407 pCtxCoreDst->rdx &= u64Mask;
408 pCtxCoreDst->rsi &= u64Mask;
409 pCtxCoreDst->rdi &= u64Mask;
410 pCtxCoreDst->rbp &= u64Mask;
411 pCtxCoreDst->rsp &= u64Mask;
412 pCtxCoreDst->rflags.u &= u64Mask;
413
414 pCtxCoreDst->r8 = 0;
415 pCtxCoreDst->r9 = 0;
416 pCtxCoreDst->r10 = 0;
417 pCtxCoreDst->r11 = 0;
418 pCtxCoreDst->r12 = 0;
419 pCtxCoreDst->r13 = 0;
420 pCtxCoreDst->r14 = 0;
421 pCtxCoreDst->r15 = 0;
422 }
423}
424
425
426/**
427 * Queries the pointer to the internal CPUMCTX structure
428 *
429 * @returns VBox status code.
430 * @param pVM Handle to the virtual machine.
431 * @param ppCtx Receives the CPUMCTX pointer when successful.
432 */
433CPUMDECL(int) CPUMQueryGuestCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
434{
435 *ppCtx = &pVM->cpum.s.Guest;
436 return VINF_SUCCESS;
437}
438
439
440CPUMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
441{
442 pVM->cpum.s.Guest.gdtr.cbGdt = limit;
443 pVM->cpum.s.Guest.gdtr.pGdt = addr;
444 pVM->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
445 return VINF_SUCCESS;
446}
447
448CPUMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
449{
450 pVM->cpum.s.Guest.idtr.cbIdt = limit;
451 pVM->cpum.s.Guest.idtr.pIdt = addr;
452 pVM->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
453 return VINF_SUCCESS;
454}
455
456CPUMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
457{
458 pVM->cpum.s.Guest.tr = tr;
459 pVM->cpum.s.fChanged |= CPUM_CHANGED_TR;
460 return VINF_SUCCESS;
461}
462
463CPUMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
464{
465 pVM->cpum.s.Guest.ldtr = ldtr;
466 pVM->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * Set the guest CR0.
473 *
474 * When called in GC, the hyper CR0 may be updated if that is
475 * required. The caller only has to take special action if AM,
476 * WP, PG or PE changes.
477 *
478 * @returns VINF_SUCCESS (consider it void).
479 * @param pVM Pointer to the shared VM structure.
480 * @param cr0 The new CR0 value.
481 */
482CPUMDECL(int) CPUMSetGuestCR0(PVM pVM, uint64_t cr0)
483{
484#ifdef IN_GC
485 /*
486 * Check if we need to change hypervisor CR0 because
487 * of math stuff.
488 */
489 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
490 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
491 {
492 if (!(pVM->cpum.s.fUseFlags & CPUM_USED_FPU))
493 {
494 /*
495 * We haven't saved the host FPU state yet, so TS and MT are both set
496 * and EM should be reflecting the guest EM (it always does this).
497 */
498 if ((cr0 & X86_CR0_EM) != (pVM->cpum.s.Guest.cr0 & X86_CR0_EM))
499 {
500 uint32_t HyperCR0 = ASMGetCR0();
501 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
502 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
503 HyperCR0 &= ~X86_CR0_EM;
504 HyperCR0 |= cr0 & X86_CR0_EM;
505 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
506 ASMSetCR0(HyperCR0);
507 }
508#ifdef VBOX_STRICT
509 else
510 {
511 uint32_t HyperCR0 = ASMGetCR0();
512 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
513 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
514 }
515#endif
516 }
517 else
518 {
519 /*
520 * Already saved the state, so we're just mirroring
521 * the guest flags.
522 */
523 uint32_t HyperCR0 = ASMGetCR0();
524 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
525 == (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
526 ("%#x %#x\n", HyperCR0, pVM->cpum.s.Guest.cr0));
527 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
528 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
529 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
530 ASMSetCR0(HyperCR0);
531 }
532 }
533#endif
534
535 /*
536 * Check for changes causing TLB flushes (for REM).
537 * The caller is responsible for calling PGM when appropriate.
538 */
539 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
540 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
541 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
542 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR0;
543
544 pVM->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
545 return VINF_SUCCESS;
546}
547
548CPUMDECL(int) CPUMSetGuestCR2(PVM pVM, uint64_t cr2)
549{
550 pVM->cpum.s.Guest.cr2 = cr2;
551 return VINF_SUCCESS;
552}
553
554CPUMDECL(int) CPUMSetGuestCR3(PVM pVM, uint64_t cr3)
555{
556 pVM->cpum.s.Guest.cr3 = cr3;
557 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR3;
558 return VINF_SUCCESS;
559}
560
561CPUMDECL(int) CPUMSetGuestCR4(PVM pVM, uint64_t cr4)
562{
563 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
564 != (pVM->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
565 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
566 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR4;
567 if (!CPUMSupportsFXSR(pVM))
568 cr4 &= ~X86_CR4_OSFSXR;
569 pVM->cpum.s.Guest.cr4 = cr4;
570 return VINF_SUCCESS;
571}
572
573CPUMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
574{
575 pVM->cpum.s.Guest.eflags.u32 = eflags;
576 return VINF_SUCCESS;
577}
578
579CPUMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
580{
581 pVM->cpum.s.Guest.eip = eip;
582 return VINF_SUCCESS;
583}
584
585CPUMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
586{
587 pVM->cpum.s.Guest.eax = eax;
588 return VINF_SUCCESS;
589}
590
591CPUMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
592{
593 pVM->cpum.s.Guest.ebx = ebx;
594 return VINF_SUCCESS;
595}
596
597CPUMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
598{
599 pVM->cpum.s.Guest.ecx = ecx;
600 return VINF_SUCCESS;
601}
602
603CPUMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
604{
605 pVM->cpum.s.Guest.edx = edx;
606 return VINF_SUCCESS;
607}
608
609CPUMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
610{
611 pVM->cpum.s.Guest.esp = esp;
612 return VINF_SUCCESS;
613}
614
615CPUMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
616{
617 pVM->cpum.s.Guest.ebp = ebp;
618 return VINF_SUCCESS;
619}
620
621CPUMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
622{
623 pVM->cpum.s.Guest.esi = esi;
624 return VINF_SUCCESS;
625}
626
627CPUMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
628{
629 pVM->cpum.s.Guest.edi = edi;
630 return VINF_SUCCESS;
631}
632
633CPUMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
634{
635 pVM->cpum.s.Guest.ss = ss;
636 return VINF_SUCCESS;
637}
638
639CPUMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
640{
641 pVM->cpum.s.Guest.cs = cs;
642 return VINF_SUCCESS;
643}
644
645CPUMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
646{
647 pVM->cpum.s.Guest.ds = ds;
648 return VINF_SUCCESS;
649}
650
651CPUMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
652{
653 pVM->cpum.s.Guest.es = es;
654 return VINF_SUCCESS;
655}
656
657CPUMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
658{
659 pVM->cpum.s.Guest.fs = fs;
660 return VINF_SUCCESS;
661}
662
663CPUMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
664{
665 pVM->cpum.s.Guest.gs = gs;
666 return VINF_SUCCESS;
667}
668
669CPUMDECL(void) CPUMSetGuestEFER(PVM pVM, uint64_t val)
670{
671 pVM->cpum.s.Guest.msrEFER = val;
672}
673
674CPUMDECL(uint32_t) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
675{
676 if (pcbLimit)
677 *pcbLimit = pVM->cpum.s.Guest.idtr.cbIdt;
678 return pVM->cpum.s.Guest.idtr.pIdt;
679}
680
681CPUMDECL(RTSEL) CPUMGetGuestTR(PVM pVM)
682{
683 return pVM->cpum.s.Guest.tr;
684}
685
686CPUMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
687{
688 return pVM->cpum.s.Guest.cs;
689}
690
691CPUMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
692{
693 return pVM->cpum.s.Guest.ds;
694}
695
696CPUMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
697{
698 return pVM->cpum.s.Guest.es;
699}
700
701CPUMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
702{
703 return pVM->cpum.s.Guest.fs;
704}
705
706CPUMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
707{
708 return pVM->cpum.s.Guest.gs;
709}
710
711CPUMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
712{
713 return pVM->cpum.s.Guest.ss;
714}
715
716CPUMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
717{
718 return pVM->cpum.s.Guest.ldtr;
719}
720
721CPUMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM)
722{
723 return pVM->cpum.s.Guest.cr0;
724}
725
726CPUMDECL(uint64_t) CPUMGetGuestCR2(PVM pVM)
727{
728 return pVM->cpum.s.Guest.cr2;
729}
730
731CPUMDECL(uint64_t) CPUMGetGuestCR3(PVM pVM)
732{
733 return pVM->cpum.s.Guest.cr3;
734}
735
736CPUMDECL(uint64_t) CPUMGetGuestCR4(PVM pVM)
737{
738 return pVM->cpum.s.Guest.cr4;
739}
740
741CPUMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
742{
743 *pGDTR = pVM->cpum.s.Guest.gdtr;
744}
745
746CPUMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
747{
748 return pVM->cpum.s.Guest.eip;
749}
750
751CPUMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
752{
753 return pVM->cpum.s.Guest.eax;
754}
755
756CPUMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
757{
758 return pVM->cpum.s.Guest.ebx;
759}
760
761CPUMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
762{
763 return pVM->cpum.s.Guest.ecx;
764}
765
766CPUMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
767{
768 return pVM->cpum.s.Guest.edx;
769}
770
771CPUMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
772{
773 return pVM->cpum.s.Guest.esi;
774}
775
776CPUMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
777{
778 return pVM->cpum.s.Guest.edi;
779}
780
781CPUMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
782{
783 return pVM->cpum.s.Guest.esp;
784}
785
786CPUMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
787{
788 return pVM->cpum.s.Guest.ebp;
789}
790
791CPUMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
792{
793 return pVM->cpum.s.Guest.eflags.u32;
794}
795
796CPUMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)
797{
798 return &pVM->cpum.s.Guest.trHid;
799}
800
801//@todo: crx should be an array
802CPUMDECL(int) CPUMGetGuestCRx(PVM pVM, unsigned iReg, uint64_t *pValue)
803{
804 switch (iReg)
805 {
806 case USE_REG_CR0:
807 *pValue = pVM->cpum.s.Guest.cr0;
808 break;
809 case USE_REG_CR2:
810 *pValue = pVM->cpum.s.Guest.cr2;
811 break;
812 case USE_REG_CR3:
813 *pValue = pVM->cpum.s.Guest.cr3;
814 break;
815 case USE_REG_CR4:
816 *pValue = pVM->cpum.s.Guest.cr4;
817 break;
818 default:
819 return VERR_INVALID_PARAMETER;
820 }
821 return VINF_SUCCESS;
822}
823
824CPUMDECL(uint64_t) CPUMGetGuestDR0(PVM pVM)
825{
826 return pVM->cpum.s.Guest.dr0;
827}
828
829CPUMDECL(uint64_t) CPUMGetGuestDR1(PVM pVM)
830{
831 return pVM->cpum.s.Guest.dr1;
832}
833
834CPUMDECL(uint64_t) CPUMGetGuestDR2(PVM pVM)
835{
836 return pVM->cpum.s.Guest.dr2;
837}
838
839CPUMDECL(uint64_t) CPUMGetGuestDR3(PVM pVM)
840{
841 return pVM->cpum.s.Guest.dr3;
842}
843
844CPUMDECL(uint64_t) CPUMGetGuestDR6(PVM pVM)
845{
846 return pVM->cpum.s.Guest.dr6;
847}
848
849CPUMDECL(uint64_t) CPUMGetGuestDR7(PVM pVM)
850{
851 return pVM->cpum.s.Guest.dr7;
852}
853
854/** @todo drx should be an array */
855CPUMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint64_t *pValue)
856{
857 switch (iReg)
858 {
859 case USE_REG_DR0:
860 *pValue = pVM->cpum.s.Guest.dr0;
861 break;
862 case USE_REG_DR1:
863 *pValue = pVM->cpum.s.Guest.dr1;
864 break;
865 case USE_REG_DR2:
866 *pValue = pVM->cpum.s.Guest.dr2;
867 break;
868 case USE_REG_DR3:
869 *pValue = pVM->cpum.s.Guest.dr3;
870 break;
871 case USE_REG_DR4:
872 case USE_REG_DR6:
873 *pValue = pVM->cpum.s.Guest.dr6;
874 break;
875 case USE_REG_DR5:
876 case USE_REG_DR7:
877 *pValue = pVM->cpum.s.Guest.dr7;
878 break;
879
880 default:
881 return VERR_INVALID_PARAMETER;
882 }
883 return VINF_SUCCESS;
884}
885
886CPUMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM)
887{
888 return pVM->cpum.s.Guest.msrEFER;
889}
890
891/**
892 * Gets a CpuId leaf.
893 *
894 * @param pVM The VM handle.
895 * @param iLeaf The CPUID leaf to get.
896 * @param pEax Where to store the EAX value.
897 * @param pEbx Where to store the EBX value.
898 * @param pEcx Where to store the ECX value.
899 * @param pEdx Where to store the EDX value.
900 */
901CPUMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
902{
903 PCCPUMCPUID pCpuId;
904 if (iLeaf < ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
905 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
906 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
907 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
908 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
909 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
910 else
911 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
912
913 *pEax = pCpuId->eax;
914 *pEbx = pCpuId->ebx;
915 *pEcx = pCpuId->ecx;
916 *pEdx = pCpuId->edx;
917 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
918}
919
920/**
921 * Gets a pointer to the array of standard CPUID leafs.
922 *
923 * CPUMGetGuestCpuIdStdMax() give the size of the array.
924 *
925 * @returns Pointer to the standard CPUID leafs (read-only).
926 * @param pVM The VM handle.
927 * @remark Intended for PATM.
928 */
929CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdGCPtr(PVM pVM)
930{
931 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
932}
933
934/**
935 * Gets a pointer to the array of extended CPUID leafs.
936 *
937 * CPUMGetGuestCpuIdExtMax() give the size of the array.
938 *
939 * @returns Pointer to the extended CPUID leafs (read-only).
940 * @param pVM The VM handle.
941 * @remark Intended for PATM.
942 */
943CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtGCPtr(PVM pVM)
944{
945 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
946}
947
948/**
949 * Gets a pointer to the array of centaur CPUID leafs.
950 *
951 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
952 *
953 * @returns Pointer to the centaur CPUID leafs (read-only).
954 * @param pVM The VM handle.
955 * @remark Intended for PATM.
956 */
957CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurGCPtr(PVM pVM)
958{
959 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
960}
961
962/**
963 * Gets a pointer to the default CPUID leaf.
964 *
965 * @returns Pointer to the default CPUID leaf (read-only).
966 * @param pVM The VM handle.
967 * @remark Intended for PATM.
968 */
969CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefGCPtr(PVM pVM)
970{
971 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
972}
973
974/**
975 * Gets a number of standard CPUID leafs.
976 *
977 * @returns Number of leafs.
978 * @param pVM The VM handle.
979 * @remark Intended for PATM.
980 */
981CPUMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
982{
983 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
984}
985
986/**
987 * Gets a number of extended CPUID leafs.
988 *
989 * @returns Number of leafs.
990 * @param pVM The VM handle.
991 * @remark Intended for PATM.
992 */
993CPUMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
994{
995 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
996}
997
998/**
999 * Gets a number of centaur CPUID leafs.
1000 *
1001 * @returns Number of leafs.
1002 * @param pVM The VM handle.
1003 * @remark Intended for PATM.
1004 */
1005CPUMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1006{
1007 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1008}
1009
1010/**
1011 * Sets a CPUID feature bit.
1012 *
1013 * @param pVM The VM Handle.
1014 * @param enmFeature The feature to set.
1015 */
1016CPUMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1017{
1018 switch (enmFeature)
1019 {
1020 /*
1021 * Set the APIC bit in both feature masks.
1022 */
1023 case CPUMCPUIDFEATURE_APIC:
1024 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1025 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1026 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1027 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1028 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1029 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1030 break;
1031
1032 /*
1033 * Set the sysenter/sysexit bit in the standard feature mask.
1034 * Assumes the caller knows what it's doing! (host must support these)
1035 */
1036 case CPUMCPUIDFEATURE_SEP:
1037 {
1038 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1039 {
1040 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1041 return;
1042 }
1043
1044 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1045 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1046 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1047 break;
1048 }
1049
1050 /*
1051 * Set the syscall/sysret bit in the extended feature mask.
1052 * Assumes the caller knows what it's doing! (host must support these)
1053 */
1054 case CPUMCPUIDFEATURE_SYSCALL:
1055 {
1056 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1057 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1058 {
1059 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1060 return;
1061 }
1062 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1063 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1064 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1065 break;
1066 }
1067
1068 /*
1069 * Set the PAE bit in both feature masks.
1070 * Assumes the caller knows what it's doing! (host must support these)
1071 */
1072 case CPUMCPUIDFEATURE_PAE:
1073 {
1074 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1075 {
1076 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1077 return;
1078 }
1079
1080 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1081 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1082 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1083 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1084 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1085 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1086 break;
1087 }
1088
1089 /*
1090 * Set the LONG MODE bit in the extended feature mask.
1091 * Assumes the caller knows what it's doing! (host must support these)
1092 */
1093 case CPUMCPUIDFEATURE_LONG_MODE:
1094 {
1095 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1096 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1097 {
1098 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1099 return;
1100 }
1101
1102 /* Valid for both Intel and AMD. */
1103 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1104 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1105 break;
1106 }
1107
1108 /*
1109 * Set the NXE bit in the extended feature mask.
1110 * Assumes the caller knows what it's doing! (host must support these)
1111 */
1112 case CPUMCPUIDFEATURE_NXE:
1113 {
1114 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1115 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1116 {
1117 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1118 return;
1119 }
1120
1121 /* Valid for both Intel and AMD. */
1122 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1123 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1124 break;
1125 }
1126
1127 case CPUMCPUIDFEATURE_LAHF:
1128 {
1129 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1130 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1131 {
1132 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1133 return;
1134 }
1135
1136 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1137 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1138 break;
1139 }
1140
1141 default:
1142 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1143 break;
1144 }
1145 pVM->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1146}
1147
1148/**
1149 * Queries a CPUID feature bit.
1150 *
1151 * @returns boolean for feature presence
1152 * @param pVM The VM Handle.
1153 * @param enmFeature The feature to query.
1154 */
1155CPUMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1156{
1157 switch (enmFeature)
1158 {
1159 case CPUMCPUIDFEATURE_PAE:
1160 {
1161 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1162 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1163 break;
1164 }
1165
1166 default:
1167 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1168 break;
1169 }
1170 return false;
1171}
1172
1173/**
1174 * Clears a CPUID feature bit.
1175 *
1176 * @param pVM The VM Handle.
1177 * @param enmFeature The feature to clear.
1178 */
1179CPUMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1180{
1181 switch (enmFeature)
1182 {
1183 /*
1184 * Set the APIC bit in both feature masks.
1185 */
1186 case CPUMCPUIDFEATURE_APIC:
1187 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1188 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1189 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1190 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1191 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1192 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1193 break;
1194
1195 case CPUMCPUIDFEATURE_PAE:
1196 {
1197 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1198 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1199 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1200 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1201 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1202 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1203 break;
1204 }
1205
1206 default:
1207 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1208 break;
1209 }
1210 pVM->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1211}
1212
1213/**
1214 * Gets the CPU vendor
1215 *
1216 * @returns CPU vendor
1217 * @param pVM The VM handle.
1218 */
1219CPUMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM)
1220{
1221 return pVM->cpum.s.enmCPUVendor;
1222}
1223
1224
1225CPUMDECL(int) CPUMSetGuestDR0(PVM pVM, uint64_t uDr0)
1226{
1227 pVM->cpum.s.Guest.dr0 = uDr0;
1228 return CPUMRecalcHyperDRx(pVM);
1229}
1230
1231CPUMDECL(int) CPUMSetGuestDR1(PVM pVM, uint64_t uDr1)
1232{
1233 pVM->cpum.s.Guest.dr1 = uDr1;
1234 return CPUMRecalcHyperDRx(pVM);
1235}
1236
1237CPUMDECL(int) CPUMSetGuestDR2(PVM pVM, uint64_t uDr2)
1238{
1239 pVM->cpum.s.Guest.dr2 = uDr2;
1240 return CPUMRecalcHyperDRx(pVM);
1241}
1242
1243CPUMDECL(int) CPUMSetGuestDR3(PVM pVM, uint64_t uDr3)
1244{
1245 pVM->cpum.s.Guest.dr3 = uDr3;
1246 return CPUMRecalcHyperDRx(pVM);
1247}
1248
1249CPUMDECL(int) CPUMSetGuestDR6(PVM pVM, uint64_t uDr6)
1250{
1251 pVM->cpum.s.Guest.dr6 = uDr6;
1252 return CPUMRecalcHyperDRx(pVM);
1253}
1254
1255CPUMDECL(int) CPUMSetGuestDR7(PVM pVM, uint64_t uDr7)
1256{
1257 pVM->cpum.s.Guest.dr7 = uDr7;
1258 return CPUMRecalcHyperDRx(pVM);
1259}
1260
1261/** @todo drx should be an array */
1262CPUMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint64_t Value)
1263{
1264 switch (iReg)
1265 {
1266 case USE_REG_DR0:
1267 pVM->cpum.s.Guest.dr0 = Value;
1268 break;
1269 case USE_REG_DR1:
1270 pVM->cpum.s.Guest.dr1 = Value;
1271 break;
1272 case USE_REG_DR2:
1273 pVM->cpum.s.Guest.dr2 = Value;
1274 break;
1275 case USE_REG_DR3:
1276 pVM->cpum.s.Guest.dr3 = Value;
1277 break;
1278 case USE_REG_DR4:
1279 case USE_REG_DR6:
1280 pVM->cpum.s.Guest.dr6 = Value;
1281 break;
1282 case USE_REG_DR5:
1283 case USE_REG_DR7:
1284 pVM->cpum.s.Guest.dr7 = Value;
1285 break;
1286
1287 default:
1288 return VERR_INVALID_PARAMETER;
1289 }
1290 return CPUMRecalcHyperDRx(pVM);
1291}
1292
1293
1294/**
1295 * Recalculates the hypvervisor DRx register values based on
1296 * current guest registers and DBGF breakpoints.
1297 *
1298 * This is called whenever a guest DRx register is modified and when DBGF
1299 * sets a hardware breakpoint. In guest context this function will reload
1300 * any (hyper) DRx registers which comes out with a different value.
1301 *
1302 * @returns VINF_SUCCESS.
1303 * @param pVM The VM handle.
1304 */
1305CPUMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1306{
1307 /*
1308 * Compare the DR7s first.
1309 *
1310 * We only care about the enabled flags. The GE and LE flags are always
1311 * set and we don't care if the guest doesn't set them. GD is virtualized
1312 * when we dispatch #DB, we never enable it.
1313 */
1314 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1315#ifdef CPUM_VIRTUALIZE_DRX
1316 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1317#else
1318 const RTGCUINTREG uGstDr7 = 0;
1319#endif
1320 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1321 {
1322 /*
1323 * Ok, something is enabled. Recalc each of the breakpoints.
1324 * Straight forward code, not optimized/minimized in any way.
1325 */
1326 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1327
1328 /* bp 0 */
1329 RTGCUINTREG uNewDr0;
1330 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1331 {
1332 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1333 uNewDr0 = DBGFBpGetDR0(pVM);
1334 }
1335 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1336 {
1337 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1338 uNewDr0 = CPUMGetGuestDR0(pVM);
1339 }
1340 else
1341 uNewDr0 = pVM->cpum.s.Hyper.dr0;
1342
1343 /* bp 1 */
1344 RTGCUINTREG uNewDr1;
1345 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1346 {
1347 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1348 uNewDr1 = DBGFBpGetDR1(pVM);
1349 }
1350 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1351 {
1352 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1353 uNewDr1 = CPUMGetGuestDR1(pVM);
1354 }
1355 else
1356 uNewDr1 = pVM->cpum.s.Hyper.dr1;
1357
1358 /* bp 2 */
1359 RTGCUINTREG uNewDr2;
1360 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1361 {
1362 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1363 uNewDr2 = DBGFBpGetDR2(pVM);
1364 }
1365 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1366 {
1367 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1368 uNewDr2 = CPUMGetGuestDR2(pVM);
1369 }
1370 else
1371 uNewDr2 = pVM->cpum.s.Hyper.dr2;
1372
1373 /* bp 3 */
1374 RTGCUINTREG uNewDr3;
1375 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1376 {
1377 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1378 uNewDr3 = DBGFBpGetDR3(pVM);
1379 }
1380 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1381 {
1382 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1383 uNewDr3 = CPUMGetGuestDR3(pVM);
1384 }
1385 else
1386 uNewDr3 = pVM->cpum.s.Hyper.dr3;
1387
1388 /*
1389 * Apply the updates.
1390 */
1391#ifdef IN_GC
1392 if (!(pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1393 {
1394 /** @todo save host DBx registers. */
1395 }
1396#endif
1397 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1398 if (uNewDr3 != pVM->cpum.s.Hyper.dr3)
1399 CPUMSetHyperDR3(pVM, uNewDr3);
1400 if (uNewDr2 != pVM->cpum.s.Hyper.dr2)
1401 CPUMSetHyperDR2(pVM, uNewDr2);
1402 if (uNewDr1 != pVM->cpum.s.Hyper.dr1)
1403 CPUMSetHyperDR1(pVM, uNewDr1);
1404 if (uNewDr0 != pVM->cpum.s.Hyper.dr0)
1405 CPUMSetHyperDR0(pVM, uNewDr0);
1406 if (uNewDr7 != pVM->cpum.s.Hyper.dr7)
1407 CPUMSetHyperDR7(pVM, uNewDr7);
1408 }
1409 else
1410 {
1411#ifdef IN_GC
1412 if (pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1413 {
1414 /** @todo restore host DBx registers. */
1415 }
1416#endif
1417 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1418 }
1419 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1420 pVM->cpum.s.fUseFlags, pVM->cpum.s.Hyper.dr0, pVM->cpum.s.Hyper.dr1,
1421 pVM->cpum.s.Hyper.dr2, pVM->cpum.s.Hyper.dr3, pVM->cpum.s.Hyper.dr6,
1422 pVM->cpum.s.Hyper.dr7));
1423
1424 return VINF_SUCCESS;
1425}
1426
1427#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1428
1429/**
1430 * Transforms the guest CPU state to raw-ring mode.
1431 *
1432 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1433 *
1434 * @returns VBox status. (recompiler failure)
1435 * @param pVM VM handle.
1436 * @param pCtxCore The context core (for trap usage).
1437 * @see @ref pg_raw
1438 */
1439CPUMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1440{
1441 Assert(!pVM->cpum.s.fRawEntered);
1442 if (!pCtxCore)
1443 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Guest);
1444
1445 /*
1446 * Are we in Ring-0?
1447 */
1448 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1449 && !pCtxCore->eflags.Bits.u1VM)
1450 {
1451 /*
1452 * Enter execution mode.
1453 */
1454 PATMRawEnter(pVM, pCtxCore);
1455
1456 /*
1457 * Set CPL to Ring-1.
1458 */
1459 pCtxCore->ss |= 1;
1460 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1461 pCtxCore->cs |= 1;
1462 }
1463 else
1464 {
1465 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1466 ("ring-1 code not supported\n"));
1467 /*
1468 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1469 */
1470 PATMRawEnter(pVM, pCtxCore);
1471 }
1472
1473 /*
1474 * Assert sanity.
1475 */
1476 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1477 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1478 || pCtxCore->eflags.Bits.u1VM,
1479 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1480 Assert((pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1481 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1482
1483 pVM->cpum.s.fRawEntered = true;
1484 return VINF_SUCCESS;
1485}
1486
1487
1488/**
1489 * Transforms the guest CPU state from raw-ring mode to correct values.
1490 *
1491 * This function will change any selector registers with DPL=1 to DPL=0.
1492 *
1493 * @returns Adjusted rc.
1494 * @param pVM VM handle.
1495 * @param rc Raw mode return code
1496 * @param pCtxCore The context core (for trap usage).
1497 * @see @ref pg_raw
1498 */
1499CPUMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1500{
1501 /*
1502 * Don't leave if we've already left (in GC).
1503 */
1504 Assert(pVM->cpum.s.fRawEntered);
1505 if (!pVM->cpum.s.fRawEntered)
1506 return rc;
1507 pVM->cpum.s.fRawEntered = false;
1508
1509 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
1510 if (!pCtxCore)
1511 pCtxCore = CPUMCTX2CORE(pCtx);
1512 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1513 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1514 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1515
1516 /*
1517 * Are we executing in raw ring-1?
1518 */
1519 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1520 && !pCtxCore->eflags.Bits.u1VM)
1521 {
1522 /*
1523 * Leave execution mode.
1524 */
1525 PATMRawLeave(pVM, pCtxCore, rc);
1526 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1527 /** @todo See what happens if we remove this. */
1528 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1529 pCtxCore->ds &= ~X86_SEL_RPL;
1530 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1531 pCtxCore->es &= ~X86_SEL_RPL;
1532 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1533 pCtxCore->fs &= ~X86_SEL_RPL;
1534 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1535 pCtxCore->gs &= ~X86_SEL_RPL;
1536
1537 /*
1538 * Ring-1 selector => Ring-0.
1539 */
1540 pCtxCore->ss &= ~X86_SEL_RPL;
1541 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1542 pCtxCore->cs &= ~X86_SEL_RPL;
1543 }
1544 else
1545 {
1546 /*
1547 * PATM is taking care of the IOPL and IF flags for us.
1548 */
1549 PATMRawLeave(pVM, pCtxCore, rc);
1550 if (!pCtxCore->eflags.Bits.u1VM)
1551 {
1552 /** @todo See what happens if we remove this. */
1553 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1554 pCtxCore->ds &= ~X86_SEL_RPL;
1555 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1556 pCtxCore->es &= ~X86_SEL_RPL;
1557 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1558 pCtxCore->fs &= ~X86_SEL_RPL;
1559 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1560 pCtxCore->gs &= ~X86_SEL_RPL;
1561 }
1562 }
1563
1564 return rc;
1565}
1566
1567/**
1568 * Updates the EFLAGS while we're in raw-mode.
1569 *
1570 * @param pVM The VM handle.
1571 * @param pCtxCore The context core.
1572 * @param eflags The new EFLAGS value.
1573 */
1574CPUMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1575{
1576 if (!pVM->cpum.s.fRawEntered)
1577 {
1578 pCtxCore->eflags.u32 = eflags;
1579 return;
1580 }
1581 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1582}
1583
1584#endif /* !IN_RING0 */
1585
1586/**
1587 * Gets the EFLAGS while we're in raw-mode.
1588 *
1589 * @returns The eflags.
1590 * @param pVM The VM handle.
1591 * @param pCtxCore The context core.
1592 */
1593CPUMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1594{
1595#ifdef IN_RING0
1596 return pCtxCore->eflags.u32;
1597#else
1598 if (!pVM->cpum.s.fRawEntered)
1599 return pCtxCore->eflags.u32;
1600 return PATMRawGetEFlags(pVM, pCtxCore);
1601#endif
1602}
1603
1604
1605
1606
1607/**
1608 * Gets and resets the changed flags (CPUM_CHANGED_*).
1609 * Only REM should call this function.
1610 *
1611 * @returns The changed flags.
1612 * @param pVM The VM handle.
1613 */
1614CPUMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
1615{
1616 unsigned fFlags = pVM->cpum.s.fChanged;
1617 pVM->cpum.s.fChanged = 0;
1618 /** @todo change the switcher to use the fChanged flags. */
1619 if (pVM->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
1620 {
1621 fFlags |= CPUM_CHANGED_FPU_REM;
1622 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1623 }
1624 return fFlags;
1625}
1626
1627/**
1628 * Sets the specified changed flags (CPUM_CHANGED_*).
1629 *
1630 * @param pVM The VM handle.
1631 */
1632CPUMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
1633{
1634 pVM->cpum.s.fChanged |= fChangedFlags;
1635}
1636
1637/**
1638 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1639 * @returns true if supported.
1640 * @returns false if not supported.
1641 * @param pVM The VM handle.
1642 */
1643CPUMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1644{
1645 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1646}
1647
1648
1649/**
1650 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1651 * @returns true if used.
1652 * @returns false if not used.
1653 * @param pVM The VM handle.
1654 */
1655CPUMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1656{
1657 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSENTER) != 0;
1658}
1659
1660
1661/**
1662 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1663 * @returns true if used.
1664 * @returns false if not used.
1665 * @param pVM The VM handle.
1666 */
1667CPUMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1668{
1669 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSCALL) != 0;
1670}
1671
1672
1673#ifndef IN_RING3
1674/**
1675 * Lazily sync in the FPU/XMM state
1676 *
1677 * @returns VBox status code.
1678 * @param pVM VM handle.
1679 */
1680CPUMDECL(int) CPUMHandleLazyFPU(PVM pVM)
1681{
1682 return CPUMHandleLazyFPUAsm(&pVM->cpum.s);
1683}
1684
1685
1686/**
1687 * Restore host FPU/XMM state
1688 *
1689 * @returns VBox status code.
1690 * @param pVM VM handle.
1691 */
1692CPUMDECL(int) CPUMRestoreHostFPUState(PVM pVM)
1693{
1694 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
1695 return CPUMRestoreHostFPUStateAsm(&pVM->cpum.s);
1696}
1697#endif /* !IN_RING3 */
1698
1699
1700/**
1701 * Checks if we activated the FPU/XMM state of the guest OS
1702 * @returns true if we did.
1703 * @returns false if not.
1704 * @param pVM The VM handle.
1705 */
1706CPUMDECL(bool) CPUMIsGuestFPUStateActive(PVM pVM)
1707{
1708 return (pVM->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1709}
1710
1711
1712/**
1713 * Deactivate the FPU/XMM state of the guest OS
1714 * @param pVM The VM handle.
1715 */
1716CPUMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
1717{
1718 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1719}
1720
1721
1722/**
1723 * Checks if the hidden selector registers are valid
1724 * @returns true if they are.
1725 * @returns false if not.
1726 * @param pVM The VM handle.
1727 */
1728CPUMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
1729{
1730 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
1731}
1732
1733
1734/**
1735 * Checks if the hidden selector registers are valid
1736 * @param pVM The VM handle.
1737 * @param fValid Valid or not
1738 */
1739CPUMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
1740{
1741 pVM->cpum.s.fValidHiddenSelRegs = fValid;
1742}
1743
1744
1745/**
1746 * Get the current privilege level of the guest.
1747 *
1748 * @returns cpl
1749 * @param pVM VM Handle.
1750 * @param pRegFrame Trap register frame.
1751 */
1752CPUMDECL(uint32_t) CPUMGetGuestCPL(PVM pVM, PCPUMCTXCORE pCtxCore)
1753{
1754 uint32_t cpl;
1755
1756 if (CPUMAreHiddenSelRegsValid(pVM))
1757 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
1758 else if (RT_LIKELY(pVM->cpum.s.Guest.cr0 & X86_CR0_PE))
1759 {
1760 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
1761 {
1762 cpl = (pCtxCore->ss & X86_SEL_RPL);
1763#ifndef IN_RING0
1764 if (cpl == 1)
1765 cpl = 0;
1766#endif
1767 }
1768 else
1769 cpl = 3;
1770 }
1771 else
1772 cpl = 0; /* real mode; cpl is zero */
1773
1774 return cpl;
1775}
1776
1777
1778/**
1779 * Gets the current guest CPU mode.
1780 *
1781 * If paging mode is what you need, check out PGMGetGuestMode().
1782 *
1783 * @returns The CPU mode.
1784 * @param pVM The VM handle.
1785 */
1786CPUMDECL(CPUMMODE) CPUMGetGuestMode(PVM pVM)
1787{
1788 CPUMMODE enmMode;
1789 if (!(pVM->cpum.s.Guest.cr0 & X86_CR0_PE))
1790 enmMode = CPUMMODE_REAL;
1791 else
1792 if (!(pVM->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1793 enmMode = CPUMMODE_PROTECTED;
1794 else
1795 enmMode = CPUMMODE_LONG;
1796
1797 return enmMode;
1798}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette