VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 9354

Last change on this file since 9354 was 9354, checked in by vboxsync, 17 years ago

Added CPUMGetCPUVendor.
Added CPUMCPUIDFEATURE_NXE, CPUMCPUIDFEATURE_LONG_MODE, CPUMCPUIDFEATURE_LAHF & CPUMCPUIDFEATURE_SYSCALL cpuid feature bits.

Enable the required cpuid feature bits in 64 bits mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 46.9 KB
Line 
1/* $Id: CPUMAllRegs.cpp 9354 2008-06-03 13:45:14Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Gets and Sets.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38
39
40
41/** Disable stack frame pointer generation here. */
42#if defined(_MSC_VER) && !defined(DEBUG)
43# pragma optimize("y", off)
44#endif
45
46
47/**
48 * Sets or resets an alternative hypervisor context core.
49 *
50 * This is called when we get a hypervisor trap set switch the context
51 * core with the trap frame on the stack. It is called again to reset
52 * back to the default context core when resuming hypervisor execution.
53 *
54 * @param pVM The VM handle.
55 * @param pCtxCore Pointer to the alternative context core or NULL
56 * to go back to the default context core.
57 */
58CPUMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
59{
60 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTXALLSUFF(pHyperCore), pCtxCore));
61 if (!pCtxCore)
62 {
63 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
64 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
65 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
66 pVM->cpum.s.pHyperCoreGC = (RCPTRTYPE(PCPUMCTXCORE))VM_GUEST_ADDR(pVM, pCtxCore);
67 }
68 else
69 {
70 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
71 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
72 pVM->cpum.s.pHyperCoreGC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToGC(pVM, pCtxCore);
73 }
74}
75
76
77/**
78 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
79 * This is only for reading in order to save a few calls.
80 *
81 * @param pVM Handle to the virtual machine.
82 */
83CPUMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
84{
85 return pVM->cpum.s.CTXALLSUFF(pHyperCore);
86}
87
88
89/**
90 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
91 *
92 * @returns VBox status code.
93 * @param pVM Handle to the virtual machine.
94 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
95 *
96 * @deprecated This will *not* (and has never) given the right picture of the
97 * hypervisor register state. With CPUMHyperSetCtxCore() this is
98 * getting much worse. So, use the individual functions for getting
99 * and esp. setting the hypervisor registers.
100 */
101CPUMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
102{
103 *ppCtx = &pVM->cpum.s.Hyper;
104 return VINF_SUCCESS;
105}
106
107CPUMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
108{
109 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
110 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
111 pVM->cpum.s.Hyper.gdtrPadding = 0;
112 pVM->cpum.s.Hyper.gdtrPadding64 = 0;
113}
114
115CPUMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
116{
117 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
118 pVM->cpum.s.Hyper.idtr.pIdt = addr;
119 pVM->cpum.s.Hyper.idtrPadding = 0;
120 pVM->cpum.s.Hyper.idtrPadding64 = 0;
121}
122
123CPUMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
124{
125 pVM->cpum.s.Hyper.cr3 = cr3;
126}
127
128CPUMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
129{
130 pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs = SelCS;
131}
132
133CPUMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
134{
135 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds = SelDS;
136}
137
138CPUMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
139{
140 pVM->cpum.s.CTXALLSUFF(pHyperCore)->es = SelES;
141}
142
143CPUMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
144{
145 pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs = SelFS;
146}
147
148CPUMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
149{
150 pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs = SelGS;
151}
152
153CPUMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
154{
155 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss = SelSS;
156}
157
158CPUMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
159{
160 pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp = u32ESP;
161}
162
163CPUMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
164{
165 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32 = Efl;
166 return VINF_SUCCESS;
167}
168
169CPUMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
170{
171 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip = u32EIP;
172}
173
174CPUMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
175{
176 pVM->cpum.s.Hyper.tr = SelTR;
177}
178
179CPUMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
180{
181 pVM->cpum.s.Hyper.ldtr = SelLDTR;
182}
183
184CPUMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
185{
186 pVM->cpum.s.Hyper.dr0 = uDr0;
187 /** @todo in GC we must load it! */
188}
189
190CPUMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
191{
192 pVM->cpum.s.Hyper.dr1 = uDr1;
193 /** @todo in GC we must load it! */
194}
195
196CPUMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
197{
198 pVM->cpum.s.Hyper.dr2 = uDr2;
199 /** @todo in GC we must load it! */
200}
201
202CPUMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
203{
204 pVM->cpum.s.Hyper.dr3 = uDr3;
205 /** @todo in GC we must load it! */
206}
207
208CPUMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
209{
210 pVM->cpum.s.Hyper.dr6 = uDr6;
211 /** @todo in GC we must load it! */
212}
213
214CPUMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
215{
216 pVM->cpum.s.Hyper.dr7 = uDr7;
217 /** @todo in GC we must load it! */
218}
219
220
221CPUMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
222{
223 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs;
224}
225
226CPUMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
227{
228 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds;
229}
230
231CPUMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
232{
233 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->es;
234}
235
236CPUMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
237{
238 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs;
239}
240
241CPUMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
242{
243 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs;
244}
245
246CPUMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
247{
248 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss;
249}
250
251#if 0 /* these are not correct. */
252
253CPUMDECL(uint32_t) CPUMGetHyperCR0(PVM pVM)
254{
255 return pVM->cpum.s.Hyper.cr0;
256}
257
258CPUMDECL(uint32_t) CPUMGetHyperCR2(PVM pVM)
259{
260 return pVM->cpum.s.Hyper.cr2;
261}
262
263CPUMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM)
264{
265 return pVM->cpum.s.Hyper.cr3;
266}
267
268CPUMDECL(uint32_t) CPUMGetHyperCR4(PVM pVM)
269{
270 return pVM->cpum.s.Hyper.cr4;
271}
272
273#endif /* not correct */
274
275CPUMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
276{
277 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eax;
278}
279
280CPUMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
281{
282 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebx;
283}
284
285CPUMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
286{
287 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ecx;
288}
289
290CPUMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
291{
292 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edx;
293}
294
295CPUMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
296{
297 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esi;
298}
299
300CPUMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
301{
302 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edi;
303}
304
305CPUMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
306{
307 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebp;
308}
309
310CPUMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
311{
312 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp;
313}
314
315CPUMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
316{
317 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32;
318}
319
320CPUMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
321{
322 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip;
323}
324
325CPUMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
326{
327 if (pcbLimit)
328 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
329 return pVM->cpum.s.Hyper.idtr.pIdt;
330}
331
332CPUMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
333{
334 if (pcbLimit)
335 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
336 return pVM->cpum.s.Hyper.gdtr.pGdt;
337}
338
339CPUMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
340{
341 return pVM->cpum.s.Hyper.ldtr;
342}
343
344CPUMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
345{
346 return pVM->cpum.s.Hyper.dr0;
347}
348
349CPUMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
350{
351 return pVM->cpum.s.Hyper.dr1;
352}
353
354CPUMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
355{
356 return pVM->cpum.s.Hyper.dr2;
357}
358
359CPUMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
360{
361 return pVM->cpum.s.Hyper.dr3;
362}
363
364CPUMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
365{
366 return pVM->cpum.s.Hyper.dr6;
367}
368
369CPUMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
370{
371 return pVM->cpum.s.Hyper.dr7;
372}
373
374
375/**
376 * Gets the pointer to the internal CPUMCTXCORE structure.
377 * This is only for reading in order to save a few calls.
378 *
379 * @param pVM Handle to the virtual machine.
380 */
381CPUMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
382{
383 return CPUMCTX2CORE(&pVM->cpum.s.Guest);
384}
385
386
387/**
388 * Sets the guest context core registers.
389 *
390 * @param pVM Handle to the virtual machine.
391 * @param pCtxCore The new context core values.
392 */
393CPUMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
394{
395 /** @todo #1410 requires selectors to be checked. */
396
397 PCPUMCTXCORE pCtxCoreDst CPUMCTX2CORE(&pVM->cpum.s.Guest);
398 *pCtxCoreDst = *pCtxCore;
399}
400
401
402/**
403 * Queries the pointer to the internal CPUMCTX structure
404 *
405 * @returns VBox status code.
406 * @param pVM Handle to the virtual machine.
407 * @param ppCtx Receives the CPUMCTX pointer when successful.
408 */
409CPUMDECL(int) CPUMQueryGuestCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
410{
411 *ppCtx = &pVM->cpum.s.Guest;
412 return VINF_SUCCESS;
413}
414
415
416CPUMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
417{
418 pVM->cpum.s.Guest.gdtr.cbGdt = limit;
419 pVM->cpum.s.Guest.gdtr.pGdt = addr;
420 pVM->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
421 return VINF_SUCCESS;
422}
423
424CPUMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
425{
426 pVM->cpum.s.Guest.idtr.cbIdt = limit;
427 pVM->cpum.s.Guest.idtr.pIdt = addr;
428 pVM->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
429 return VINF_SUCCESS;
430}
431
432CPUMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
433{
434 pVM->cpum.s.Guest.tr = tr;
435 pVM->cpum.s.fChanged |= CPUM_CHANGED_TR;
436 return VINF_SUCCESS;
437}
438
439CPUMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
440{
441 pVM->cpum.s.Guest.ldtr = ldtr;
442 pVM->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
443 return VINF_SUCCESS;
444}
445
446
447/**
448 * Set the guest CR0.
449 *
450 * When called in GC, the hyper CR0 may be updated if that is
451 * required. The caller only has to take special action if AM,
452 * WP, PG or PE changes.
453 *
454 * @returns VINF_SUCCESS (consider it void).
455 * @param pVM Pointer to the shared VM structure.
456 * @param cr0 The new CR0 value.
457 */
458CPUMDECL(int) CPUMSetGuestCR0(PVM pVM, uint64_t cr0)
459{
460#ifdef IN_GC
461 /*
462 * Check if we need to change hypervisor CR0 because
463 * of math stuff.
464 */
465 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
466 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
467 {
468 if (!(pVM->cpum.s.fUseFlags & CPUM_USED_FPU))
469 {
470 /*
471 * We haven't saved the host FPU state yet, so TS and MT are both set
472 * and EM should be reflecting the guest EM (it always does this).
473 */
474 if ((cr0 & X86_CR0_EM) != (pVM->cpum.s.Guest.cr0 & X86_CR0_EM))
475 {
476 uint32_t HyperCR0 = ASMGetCR0();
477 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
478 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
479 HyperCR0 &= ~X86_CR0_EM;
480 HyperCR0 |= cr0 & X86_CR0_EM;
481 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
482 ASMSetCR0(HyperCR0);
483 }
484#ifdef VBOX_STRICT
485 else
486 {
487 uint32_t HyperCR0 = ASMGetCR0();
488 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
489 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
490 }
491#endif
492 }
493 else
494 {
495 /*
496 * Already saved the state, so we're just mirroring
497 * the guest flags.
498 */
499 uint32_t HyperCR0 = ASMGetCR0();
500 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
501 == (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
502 ("%#x %#x\n", HyperCR0, pVM->cpum.s.Guest.cr0));
503 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
504 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
505 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
506 ASMSetCR0(HyperCR0);
507 }
508 }
509#endif
510
511 /*
512 * Check for changes causing TLB flushes (for REM).
513 * The caller is responsible for calling PGM when appropriate.
514 */
515 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
516 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
517 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
518 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR0;
519
520 pVM->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
521 return VINF_SUCCESS;
522}
523
524CPUMDECL(int) CPUMSetGuestCR2(PVM pVM, uint64_t cr2)
525{
526 pVM->cpum.s.Guest.cr2 = cr2;
527 return VINF_SUCCESS;
528}
529
530CPUMDECL(int) CPUMSetGuestCR3(PVM pVM, uint64_t cr3)
531{
532 pVM->cpum.s.Guest.cr3 = cr3;
533 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR3;
534 return VINF_SUCCESS;
535}
536
537CPUMDECL(int) CPUMSetGuestCR4(PVM pVM, uint64_t cr4)
538{
539 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
540 != (pVM->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
541 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
542 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR4;
543 if (!CPUMSupportsFXSR(pVM))
544 cr4 &= ~X86_CR4_OSFSXR;
545 pVM->cpum.s.Guest.cr4 = cr4;
546 return VINF_SUCCESS;
547}
548
549CPUMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
550{
551 pVM->cpum.s.Guest.eflags.u32 = eflags;
552 return VINF_SUCCESS;
553}
554
555CPUMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
556{
557 pVM->cpum.s.Guest.eip = eip;
558 return VINF_SUCCESS;
559}
560
561CPUMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
562{
563 pVM->cpum.s.Guest.eax = eax;
564 return VINF_SUCCESS;
565}
566
567CPUMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
568{
569 pVM->cpum.s.Guest.ebx = ebx;
570 return VINF_SUCCESS;
571}
572
573CPUMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
574{
575 pVM->cpum.s.Guest.ecx = ecx;
576 return VINF_SUCCESS;
577}
578
579CPUMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
580{
581 pVM->cpum.s.Guest.edx = edx;
582 return VINF_SUCCESS;
583}
584
585CPUMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
586{
587 pVM->cpum.s.Guest.esp = esp;
588 return VINF_SUCCESS;
589}
590
591CPUMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
592{
593 pVM->cpum.s.Guest.ebp = ebp;
594 return VINF_SUCCESS;
595}
596
597CPUMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
598{
599 pVM->cpum.s.Guest.esi = esi;
600 return VINF_SUCCESS;
601}
602
603CPUMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
604{
605 pVM->cpum.s.Guest.edi = edi;
606 return VINF_SUCCESS;
607}
608
609CPUMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
610{
611 pVM->cpum.s.Guest.ss = ss;
612 return VINF_SUCCESS;
613}
614
615CPUMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
616{
617 pVM->cpum.s.Guest.cs = cs;
618 return VINF_SUCCESS;
619}
620
621CPUMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
622{
623 pVM->cpum.s.Guest.ds = ds;
624 return VINF_SUCCESS;
625}
626
627CPUMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
628{
629 pVM->cpum.s.Guest.es = es;
630 return VINF_SUCCESS;
631}
632
633CPUMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
634{
635 pVM->cpum.s.Guest.fs = fs;
636 return VINF_SUCCESS;
637}
638
639CPUMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
640{
641 pVM->cpum.s.Guest.gs = gs;
642 return VINF_SUCCESS;
643}
644
645CPUMDECL(void) CPUMSetGuestEFER(PVM pVM, uint64_t val)
646{
647 pVM->cpum.s.Guest.msrEFER = val;
648}
649
650CPUMDECL(uint32_t) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
651{
652 if (pcbLimit)
653 *pcbLimit = pVM->cpum.s.Guest.idtr.cbIdt;
654 return pVM->cpum.s.Guest.idtr.pIdt;
655}
656
657CPUMDECL(RTSEL) CPUMGetGuestTR(PVM pVM)
658{
659 return pVM->cpum.s.Guest.tr;
660}
661
662CPUMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
663{
664 return pVM->cpum.s.Guest.cs;
665}
666
667CPUMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
668{
669 return pVM->cpum.s.Guest.ds;
670}
671
672CPUMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
673{
674 return pVM->cpum.s.Guest.es;
675}
676
677CPUMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
678{
679 return pVM->cpum.s.Guest.fs;
680}
681
682CPUMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
683{
684 return pVM->cpum.s.Guest.gs;
685}
686
687CPUMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
688{
689 return pVM->cpum.s.Guest.ss;
690}
691
692CPUMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
693{
694 return pVM->cpum.s.Guest.ldtr;
695}
696
697CPUMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM)
698{
699 return pVM->cpum.s.Guest.cr0;
700}
701
702CPUMDECL(uint64_t) CPUMGetGuestCR2(PVM pVM)
703{
704 return pVM->cpum.s.Guest.cr2;
705}
706
707CPUMDECL(uint64_t) CPUMGetGuestCR3(PVM pVM)
708{
709 return pVM->cpum.s.Guest.cr3;
710}
711
712CPUMDECL(uint64_t) CPUMGetGuestCR4(PVM pVM)
713{
714 return pVM->cpum.s.Guest.cr4;
715}
716
717CPUMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
718{
719 *pGDTR = pVM->cpum.s.Guest.gdtr;
720}
721
722CPUMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
723{
724 return pVM->cpum.s.Guest.eip;
725}
726
727CPUMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
728{
729 return pVM->cpum.s.Guest.eax;
730}
731
732CPUMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
733{
734 return pVM->cpum.s.Guest.ebx;
735}
736
737CPUMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
738{
739 return pVM->cpum.s.Guest.ecx;
740}
741
742CPUMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
743{
744 return pVM->cpum.s.Guest.edx;
745}
746
747CPUMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
748{
749 return pVM->cpum.s.Guest.esi;
750}
751
752CPUMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
753{
754 return pVM->cpum.s.Guest.edi;
755}
756
757CPUMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
758{
759 return pVM->cpum.s.Guest.esp;
760}
761
762CPUMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
763{
764 return pVM->cpum.s.Guest.ebp;
765}
766
767CPUMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
768{
769 return pVM->cpum.s.Guest.eflags.u32;
770}
771
772CPUMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)
773{
774 return &pVM->cpum.s.Guest.trHid;
775}
776
777//@todo: crx should be an array
778CPUMDECL(int) CPUMGetGuestCRx(PVM pVM, unsigned iReg, uint64_t *pValue)
779{
780 switch (iReg)
781 {
782 case USE_REG_CR0:
783 *pValue = pVM->cpum.s.Guest.cr0;
784 break;
785 case USE_REG_CR2:
786 *pValue = pVM->cpum.s.Guest.cr2;
787 break;
788 case USE_REG_CR3:
789 *pValue = pVM->cpum.s.Guest.cr3;
790 break;
791 case USE_REG_CR4:
792 *pValue = pVM->cpum.s.Guest.cr4;
793 break;
794 default:
795 return VERR_INVALID_PARAMETER;
796 }
797 return VINF_SUCCESS;
798}
799
800CPUMDECL(RTGCUINTREG) CPUMGetGuestDR0(PVM pVM)
801{
802 return pVM->cpum.s.Guest.dr0;
803}
804
805CPUMDECL(RTGCUINTREG) CPUMGetGuestDR1(PVM pVM)
806{
807 return pVM->cpum.s.Guest.dr1;
808}
809
810CPUMDECL(RTGCUINTREG) CPUMGetGuestDR2(PVM pVM)
811{
812 return pVM->cpum.s.Guest.dr2;
813}
814
815CPUMDECL(RTGCUINTREG) CPUMGetGuestDR3(PVM pVM)
816{
817 return pVM->cpum.s.Guest.dr3;
818}
819
820CPUMDECL(RTGCUINTREG) CPUMGetGuestDR6(PVM pVM)
821{
822 return pVM->cpum.s.Guest.dr6;
823}
824
825CPUMDECL(RTGCUINTREG) CPUMGetGuestDR7(PVM pVM)
826{
827 return pVM->cpum.s.Guest.dr7;
828}
829
830/** @todo drx should be an array */
831CPUMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint32_t *pValue)
832{
833 switch (iReg)
834 {
835 case USE_REG_DR0:
836 *pValue = pVM->cpum.s.Guest.dr0;
837 break;
838 case USE_REG_DR1:
839 *pValue = pVM->cpum.s.Guest.dr1;
840 break;
841 case USE_REG_DR2:
842 *pValue = pVM->cpum.s.Guest.dr2;
843 break;
844 case USE_REG_DR3:
845 *pValue = pVM->cpum.s.Guest.dr3;
846 break;
847 case USE_REG_DR4:
848 case USE_REG_DR6:
849 *pValue = pVM->cpum.s.Guest.dr6;
850 break;
851 case USE_REG_DR5:
852 case USE_REG_DR7:
853 *pValue = pVM->cpum.s.Guest.dr7;
854 break;
855
856 default:
857 return VERR_INVALID_PARAMETER;
858 }
859 return VINF_SUCCESS;
860}
861
862CPUMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM)
863{
864 return pVM->cpum.s.Guest.msrEFER;
865}
866
867/**
868 * Gets a CpuId leaf.
869 *
870 * @param pVM The VM handle.
871 * @param iLeaf The CPUID leaf to get.
872 * @param pEax Where to store the EAX value.
873 * @param pEbx Where to store the EBX value.
874 * @param pEcx Where to store the ECX value.
875 * @param pEdx Where to store the EDX value.
876 */
877CPUMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
878{
879 PCCPUMCPUID pCpuId;
880 if (iLeaf < ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
881 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
882 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
883 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
884 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
885 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
886 else
887 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
888
889 *pEax = pCpuId->eax;
890 *pEbx = pCpuId->ebx;
891 *pEcx = pCpuId->ecx;
892 *pEdx = pCpuId->edx;
893 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
894}
895
896/**
897 * Gets a pointer to the array of standard CPUID leafs.
898 *
899 * CPUMGetGuestCpuIdStdMax() give the size of the array.
900 *
901 * @returns Pointer to the standard CPUID leafs (read-only).
902 * @param pVM The VM handle.
903 * @remark Intended for PATM.
904 */
905CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdGCPtr(PVM pVM)
906{
907 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
908}
909
910/**
911 * Gets a pointer to the array of extended CPUID leafs.
912 *
913 * CPUMGetGuestCpuIdExtMax() give the size of the array.
914 *
915 * @returns Pointer to the extended CPUID leafs (read-only).
916 * @param pVM The VM handle.
917 * @remark Intended for PATM.
918 */
919CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtGCPtr(PVM pVM)
920{
921 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
922}
923
924/**
925 * Gets a pointer to the array of centaur CPUID leafs.
926 *
927 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
928 *
929 * @returns Pointer to the centaur CPUID leafs (read-only).
930 * @param pVM The VM handle.
931 * @remark Intended for PATM.
932 */
933CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurGCPtr(PVM pVM)
934{
935 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
936}
937
938/**
939 * Gets a pointer to the default CPUID leaf.
940 *
941 * @returns Pointer to the default CPUID leaf (read-only).
942 * @param pVM The VM handle.
943 * @remark Intended for PATM.
944 */
945CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefGCPtr(PVM pVM)
946{
947 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
948}
949
950/**
951 * Gets a number of standard CPUID leafs.
952 *
953 * @returns Number of leafs.
954 * @param pVM The VM handle.
955 * @remark Intended for PATM.
956 */
957CPUMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
958{
959 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
960}
961
962/**
963 * Gets a number of extended CPUID leafs.
964 *
965 * @returns Number of leafs.
966 * @param pVM The VM handle.
967 * @remark Intended for PATM.
968 */
969CPUMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
970{
971 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
972}
973
974/**
975 * Gets a number of centaur CPUID leafs.
976 *
977 * @returns Number of leafs.
978 * @param pVM The VM handle.
979 * @remark Intended for PATM.
980 */
981CPUMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
982{
983 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
984}
985
986/**
987 * Sets a CPUID feature bit.
988 *
989 * @param pVM The VM Handle.
990 * @param enmFeature The feature to set.
991 */
992CPUMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
993{
994 switch (enmFeature)
995 {
996 /*
997 * Set the APIC bit in both feature masks.
998 */
999 case CPUMCPUIDFEATURE_APIC:
1000 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1001 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1002 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1003 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1004 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1005 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1006 break;
1007
1008 /*
1009 * Set the sysenter/sysexit bit in the standard feature mask.
1010 * Assumes the caller knows what it's doing! (host must support these)
1011 */
1012 case CPUMCPUIDFEATURE_SEP:
1013 {
1014 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1015 {
1016 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1017 return;
1018 }
1019
1020 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1021 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1022 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1023 break;
1024 }
1025
1026 /*
1027 * Set the syscall/sysret bit in the extended feature mask.
1028 * Assumes the caller knows what it's doing! (host must support these)
1029 */
1030 case CPUMCPUIDFEATURE_SYSCALL:
1031 {
1032 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1033 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1034 {
1035 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1036 return;
1037 }
1038 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1039 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1040 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1041 break;
1042 }
1043
1044 /*
1045 * Set the PAE bit in both feature masks.
1046 * Assumes the caller knows what it's doing! (host must support these)
1047 */
1048 case CPUMCPUIDFEATURE_PAE:
1049 {
1050 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1051 {
1052 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1053 return;
1054 }
1055
1056 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1057 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1058 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1059 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1060 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1061 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1062 break;
1063 }
1064
1065 /*
1066 * Set the LONG MODE bit in the extended feature mask.
1067 * Assumes the caller knows what it's doing! (host must support these)
1068 */
1069 case CPUMCPUIDFEATURE_LONG_MODE:
1070 {
1071 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1072 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1073 {
1074 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1075 return;
1076 }
1077
1078 /* Valid for both Intel and AMD. */
1079 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1080 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1081 break;
1082 }
1083
1084 /*
1085 * Set the NXE bit in the extended feature mask.
1086 * Assumes the caller knows what it's doing! (host must support these)
1087 */
1088 case CPUMCPUIDFEATURE_NXE:
1089 {
1090 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1091 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1092 {
1093 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1094 return;
1095 }
1096
1097 /* Valid for both Intel and AMD. */
1098 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1099 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1100 break;
1101 }
1102
1103 case CPUMCPUIDFEATURE_LAHF:
1104 {
1105 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1106 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1107 {
1108 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1109 return;
1110 }
1111
1112 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1113 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1114 break;
1115 }
1116
1117 default:
1118 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1119 break;
1120 }
1121 pVM->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1122}
1123
1124/**
1125 * Queries a CPUID feature bit.
1126 *
1127 * @returns boolean for feature presence
1128 * @param pVM The VM Handle.
1129 * @param enmFeature The feature to query.
1130 */
1131CPUMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1132{
1133 switch (enmFeature)
1134 {
1135 case CPUMCPUIDFEATURE_PAE:
1136 {
1137 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1138 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1139 break;
1140 }
1141
1142 default:
1143 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1144 break;
1145 }
1146 return false;
1147}
1148
1149/**
1150 * Clears a CPUID feature bit.
1151 *
1152 * @param pVM The VM Handle.
1153 * @param enmFeature The feature to clear.
1154 */
1155CPUMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1156{
1157 switch (enmFeature)
1158 {
1159 /*
1160 * Set the APIC bit in both feature masks.
1161 */
1162 case CPUMCPUIDFEATURE_APIC:
1163 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1164 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1165 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1166 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1167 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1168 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1169 break;
1170
1171 case CPUMCPUIDFEATURE_PAE:
1172 {
1173 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1174 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1175 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1176 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1177 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1178 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1179 break;
1180 }
1181
1182 default:
1183 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1184 break;
1185 }
1186 pVM->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1187}
1188
1189/**
1190 * Gets the CPU vendor
1191 *
1192 * @returns CPU vendor
1193 * @param pVM The VM handle.
1194 */
1195CPUMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM)
1196{
1197 return pVM->cpum.s.enmCPUVendor;
1198}
1199
1200
1201CPUMDECL(int) CPUMSetGuestDR0(PVM pVM, RTGCUINTREG uDr0)
1202{
1203 pVM->cpum.s.Guest.dr0 = uDr0;
1204 return CPUMRecalcHyperDRx(pVM);
1205}
1206
1207CPUMDECL(int) CPUMSetGuestDR1(PVM pVM, RTGCUINTREG uDr1)
1208{
1209 pVM->cpum.s.Guest.dr1 = uDr1;
1210 return CPUMRecalcHyperDRx(pVM);
1211}
1212
1213CPUMDECL(int) CPUMSetGuestDR2(PVM pVM, RTGCUINTREG uDr2)
1214{
1215 pVM->cpum.s.Guest.dr2 = uDr2;
1216 return CPUMRecalcHyperDRx(pVM);
1217}
1218
1219CPUMDECL(int) CPUMSetGuestDR3(PVM pVM, RTGCUINTREG uDr3)
1220{
1221 pVM->cpum.s.Guest.dr3 = uDr3;
1222 return CPUMRecalcHyperDRx(pVM);
1223}
1224
1225CPUMDECL(int) CPUMSetGuestDR6(PVM pVM, RTGCUINTREG uDr6)
1226{
1227 pVM->cpum.s.Guest.dr6 = uDr6;
1228 return CPUMRecalcHyperDRx(pVM);
1229}
1230
1231CPUMDECL(int) CPUMSetGuestDR7(PVM pVM, RTGCUINTREG uDr7)
1232{
1233 pVM->cpum.s.Guest.dr7 = uDr7;
1234 return CPUMRecalcHyperDRx(pVM);
1235}
1236
1237/** @todo drx should be an array */
1238CPUMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint32_t Value)
1239{
1240 switch (iReg)
1241 {
1242 case USE_REG_DR0:
1243 pVM->cpum.s.Guest.dr0 = Value;
1244 break;
1245 case USE_REG_DR1:
1246 pVM->cpum.s.Guest.dr1 = Value;
1247 break;
1248 case USE_REG_DR2:
1249 pVM->cpum.s.Guest.dr2 = Value;
1250 break;
1251 case USE_REG_DR3:
1252 pVM->cpum.s.Guest.dr3 = Value;
1253 break;
1254 case USE_REG_DR4:
1255 case USE_REG_DR6:
1256 pVM->cpum.s.Guest.dr6 = Value;
1257 break;
1258 case USE_REG_DR5:
1259 case USE_REG_DR7:
1260 pVM->cpum.s.Guest.dr7 = Value;
1261 break;
1262
1263 default:
1264 return VERR_INVALID_PARAMETER;
1265 }
1266 return CPUMRecalcHyperDRx(pVM);
1267}
1268
1269
1270/**
1271 * Recalculates the hypvervisor DRx register values based on
1272 * current guest registers and DBGF breakpoints.
1273 *
1274 * This is called whenever a guest DRx register is modified and when DBGF
1275 * sets a hardware breakpoint. In guest context this function will reload
1276 * any (hyper) DRx registers which comes out with a different value.
1277 *
1278 * @returns VINF_SUCCESS.
1279 * @param pVM The VM handle.
1280 */
1281CPUMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1282{
1283 /*
1284 * Compare the DR7s first.
1285 *
1286 * We only care about the enabled flags. The GE and LE flags are always
1287 * set and we don't care if the guest doesn't set them. GD is virtualized
1288 * when we dispatch #DB, we never enable it.
1289 */
1290 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1291#ifdef CPUM_VIRTUALIZE_DRX
1292 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1293#else
1294 const RTGCUINTREG uGstDr7 = 0;
1295#endif
1296 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1297 {
1298 /*
1299 * Ok, something is enabled. Recalc each of the breakpoints.
1300 * Straight forward code, not optimized/minimized in any way.
1301 */
1302 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1303
1304 /* bp 0 */
1305 RTGCUINTREG uNewDr0;
1306 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1307 {
1308 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1309 uNewDr0 = DBGFBpGetDR0(pVM);
1310 }
1311 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1312 {
1313 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1314 uNewDr0 = CPUMGetGuestDR0(pVM);
1315 }
1316 else
1317 uNewDr0 = pVM->cpum.s.Hyper.dr0;
1318
1319 /* bp 1 */
1320 RTGCUINTREG uNewDr1;
1321 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1322 {
1323 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1324 uNewDr1 = DBGFBpGetDR1(pVM);
1325 }
1326 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1327 {
1328 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1329 uNewDr1 = CPUMGetGuestDR1(pVM);
1330 }
1331 else
1332 uNewDr1 = pVM->cpum.s.Hyper.dr1;
1333
1334 /* bp 2 */
1335 RTGCUINTREG uNewDr2;
1336 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1337 {
1338 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1339 uNewDr2 = DBGFBpGetDR2(pVM);
1340 }
1341 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1342 {
1343 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1344 uNewDr2 = CPUMGetGuestDR2(pVM);
1345 }
1346 else
1347 uNewDr2 = pVM->cpum.s.Hyper.dr2;
1348
1349 /* bp 3 */
1350 RTGCUINTREG uNewDr3;
1351 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1352 {
1353 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1354 uNewDr3 = DBGFBpGetDR3(pVM);
1355 }
1356 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1357 {
1358 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1359 uNewDr3 = CPUMGetGuestDR3(pVM);
1360 }
1361 else
1362 uNewDr3 = pVM->cpum.s.Hyper.dr3;
1363
1364 /*
1365 * Apply the updates.
1366 */
1367#ifdef IN_GC
1368 if (!(pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1369 {
1370 /** @todo save host DBx registers. */
1371 }
1372#endif
1373 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1374 if (uNewDr3 != pVM->cpum.s.Hyper.dr3)
1375 CPUMSetHyperDR3(pVM, uNewDr3);
1376 if (uNewDr2 != pVM->cpum.s.Hyper.dr2)
1377 CPUMSetHyperDR2(pVM, uNewDr2);
1378 if (uNewDr1 != pVM->cpum.s.Hyper.dr1)
1379 CPUMSetHyperDR1(pVM, uNewDr1);
1380 if (uNewDr0 != pVM->cpum.s.Hyper.dr0)
1381 CPUMSetHyperDR0(pVM, uNewDr0);
1382 if (uNewDr7 != pVM->cpum.s.Hyper.dr7)
1383 CPUMSetHyperDR7(pVM, uNewDr7);
1384 }
1385 else
1386 {
1387#ifdef IN_GC
1388 if (pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1389 {
1390 /** @todo restore host DBx registers. */
1391 }
1392#endif
1393 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1394 }
1395 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1396 pVM->cpum.s.fUseFlags, pVM->cpum.s.Hyper.dr0, pVM->cpum.s.Hyper.dr1,
1397 pVM->cpum.s.Hyper.dr2, pVM->cpum.s.Hyper.dr3, pVM->cpum.s.Hyper.dr6,
1398 pVM->cpum.s.Hyper.dr7));
1399
1400 return VINF_SUCCESS;
1401}
1402
1403#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1404
1405/**
1406 * Transforms the guest CPU state to raw-ring mode.
1407 *
1408 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1409 *
1410 * @returns VBox status. (recompiler failure)
1411 * @param pVM VM handle.
1412 * @param pCtxCore The context core (for trap usage).
1413 * @see @ref pg_raw
1414 */
1415CPUMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1416{
1417 Assert(!pVM->cpum.s.fRawEntered);
1418 if (!pCtxCore)
1419 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Guest);
1420
1421 /*
1422 * Are we in Ring-0?
1423 */
1424 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1425 && !pCtxCore->eflags.Bits.u1VM)
1426 {
1427 /*
1428 * Enter execution mode.
1429 */
1430 PATMRawEnter(pVM, pCtxCore);
1431
1432 /*
1433 * Set CPL to Ring-1.
1434 */
1435 pCtxCore->ss |= 1;
1436 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1437 pCtxCore->cs |= 1;
1438 }
1439 else
1440 {
1441 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1442 ("ring-1 code not supported\n"));
1443 /*
1444 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1445 */
1446 PATMRawEnter(pVM, pCtxCore);
1447 }
1448
1449 /*
1450 * Assert sanity.
1451 */
1452 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1453 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1454 || pCtxCore->eflags.Bits.u1VM,
1455 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1456 Assert((pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1457 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1458
1459 pVM->cpum.s.fRawEntered = true;
1460 return VINF_SUCCESS;
1461}
1462
1463
1464/**
1465 * Transforms the guest CPU state from raw-ring mode to correct values.
1466 *
1467 * This function will change any selector registers with DPL=1 to DPL=0.
1468 *
1469 * @returns Adjusted rc.
1470 * @param pVM VM handle.
1471 * @param rc Raw mode return code
1472 * @param pCtxCore The context core (for trap usage).
1473 * @see @ref pg_raw
1474 */
1475CPUMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1476{
1477 /*
1478 * Don't leave if we've already left (in GC).
1479 */
1480 Assert(pVM->cpum.s.fRawEntered);
1481 if (!pVM->cpum.s.fRawEntered)
1482 return rc;
1483 pVM->cpum.s.fRawEntered = false;
1484
1485 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
1486 if (!pCtxCore)
1487 pCtxCore = CPUMCTX2CORE(pCtx);
1488 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1489 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1490 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1491
1492 /*
1493 * Are we executing in raw ring-1?
1494 */
1495 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1496 && !pCtxCore->eflags.Bits.u1VM)
1497 {
1498 /*
1499 * Leave execution mode.
1500 */
1501 PATMRawLeave(pVM, pCtxCore, rc);
1502 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1503 /** @todo See what happens if we remove this. */
1504 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1505 pCtxCore->ds &= ~X86_SEL_RPL;
1506 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1507 pCtxCore->es &= ~X86_SEL_RPL;
1508 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1509 pCtxCore->fs &= ~X86_SEL_RPL;
1510 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1511 pCtxCore->gs &= ~X86_SEL_RPL;
1512
1513 /*
1514 * Ring-1 selector => Ring-0.
1515 */
1516 pCtxCore->ss &= ~X86_SEL_RPL;
1517 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1518 pCtxCore->cs &= ~X86_SEL_RPL;
1519 }
1520 else
1521 {
1522 /*
1523 * PATM is taking care of the IOPL and IF flags for us.
1524 */
1525 PATMRawLeave(pVM, pCtxCore, rc);
1526 if (!pCtxCore->eflags.Bits.u1VM)
1527 {
1528 /** @todo See what happens if we remove this. */
1529 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1530 pCtxCore->ds &= ~X86_SEL_RPL;
1531 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1532 pCtxCore->es &= ~X86_SEL_RPL;
1533 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1534 pCtxCore->fs &= ~X86_SEL_RPL;
1535 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1536 pCtxCore->gs &= ~X86_SEL_RPL;
1537 }
1538 }
1539
1540 return rc;
1541}
1542
1543/**
1544 * Updates the EFLAGS while we're in raw-mode.
1545 *
1546 * @param pVM The VM handle.
1547 * @param pCtxCore The context core.
1548 * @param eflags The new EFLAGS value.
1549 */
1550CPUMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1551{
1552 if (!pVM->cpum.s.fRawEntered)
1553 {
1554 pCtxCore->eflags.u32 = eflags;
1555 return;
1556 }
1557 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1558}
1559
1560#endif /* !IN_RING0 */
1561
1562/**
1563 * Gets the EFLAGS while we're in raw-mode.
1564 *
1565 * @returns The eflags.
1566 * @param pVM The VM handle.
1567 * @param pCtxCore The context core.
1568 */
1569CPUMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1570{
1571#ifdef IN_RING0
1572 return pCtxCore->eflags.u32;
1573#else
1574 if (!pVM->cpum.s.fRawEntered)
1575 return pCtxCore->eflags.u32;
1576 return PATMRawGetEFlags(pVM, pCtxCore);
1577#endif
1578}
1579
1580
1581
1582
1583/**
1584 * Gets and resets the changed flags (CPUM_CHANGED_*).
1585 * Only REM should call this function.
1586 *
1587 * @returns The changed flags.
1588 * @param pVM The VM handle.
1589 */
1590CPUMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
1591{
1592 unsigned fFlags = pVM->cpum.s.fChanged;
1593 pVM->cpum.s.fChanged = 0;
1594 /** @todo change the switcher to use the fChanged flags. */
1595 if (pVM->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
1596 {
1597 fFlags |= CPUM_CHANGED_FPU_REM;
1598 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1599 }
1600 return fFlags;
1601}
1602
1603/**
1604 * Sets the specified changed flags (CPUM_CHANGED_*).
1605 *
1606 * @param pVM The VM handle.
1607 */
1608CPUMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
1609{
1610 pVM->cpum.s.fChanged |= fChangedFlags;
1611}
1612
1613/**
1614 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1615 * @returns true if supported.
1616 * @returns false if not supported.
1617 * @param pVM The VM handle.
1618 */
1619CPUMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1620{
1621 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1622}
1623
1624
1625/**
1626 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1627 * @returns true if used.
1628 * @returns false if not used.
1629 * @param pVM The VM handle.
1630 */
1631CPUMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1632{
1633 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSENTER) != 0;
1634}
1635
1636
1637/**
1638 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1639 * @returns true if used.
1640 * @returns false if not used.
1641 * @param pVM The VM handle.
1642 */
1643CPUMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1644{
1645 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSCALL) != 0;
1646}
1647
1648
1649#ifndef IN_RING3
1650/**
1651 * Lazily sync in the FPU/XMM state
1652 *
1653 * @returns VBox status code.
1654 * @param pVM VM handle.
1655 */
1656CPUMDECL(int) CPUMHandleLazyFPU(PVM pVM)
1657{
1658 return CPUMHandleLazyFPUAsm(&pVM->cpum.s);
1659}
1660
1661
1662/**
1663 * Restore host FPU/XMM state
1664 *
1665 * @returns VBox status code.
1666 * @param pVM VM handle.
1667 */
1668CPUMDECL(int) CPUMRestoreHostFPUState(PVM pVM)
1669{
1670 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
1671 return CPUMRestoreHostFPUStateAsm(&pVM->cpum.s);
1672}
1673#endif /* !IN_RING3 */
1674
1675
1676/**
1677 * Checks if we activated the FPU/XMM state of the guest OS
1678 * @returns true if we did.
1679 * @returns false if not.
1680 * @param pVM The VM handle.
1681 */
1682CPUMDECL(bool) CPUMIsGuestFPUStateActive(PVM pVM)
1683{
1684 return (pVM->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1685}
1686
1687
1688/**
1689 * Deactivate the FPU/XMM state of the guest OS
1690 * @param pVM The VM handle.
1691 */
1692CPUMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
1693{
1694 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1695}
1696
1697
1698/**
1699 * Checks if the hidden selector registers are valid
1700 * @returns true if they are.
1701 * @returns false if not.
1702 * @param pVM The VM handle.
1703 */
1704CPUMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
1705{
1706 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
1707}
1708
1709
1710/**
1711 * Checks if the hidden selector registers are valid
1712 * @param pVM The VM handle.
1713 * @param fValid Valid or not
1714 */
1715CPUMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
1716{
1717 pVM->cpum.s.fValidHiddenSelRegs = fValid;
1718}
1719
1720
1721/**
1722 * Get the current privilege level of the guest.
1723 *
1724 * @returns cpl
1725 * @param pVM VM Handle.
1726 * @param pRegFrame Trap register frame.
1727 */
1728CPUMDECL(uint32_t) CPUMGetGuestCPL(PVM pVM, PCPUMCTXCORE pCtxCore)
1729{
1730 uint32_t cpl;
1731
1732 if (CPUMAreHiddenSelRegsValid(pVM))
1733 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
1734 else if (RT_LIKELY(pVM->cpum.s.Guest.cr0 & X86_CR0_PE))
1735 {
1736 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
1737 {
1738 cpl = (pCtxCore->ss & X86_SEL_RPL);
1739#ifndef IN_RING0
1740 if (cpl == 1)
1741 cpl = 0;
1742#endif
1743 }
1744 else
1745 cpl = 3;
1746 }
1747 else
1748 cpl = 0; /* real mode; cpl is zero */
1749
1750 return cpl;
1751}
1752
1753
1754/**
1755 * Gets the current guest CPU mode.
1756 *
1757 * If paging mode is what you need, check out PGMGetGuestMode().
1758 *
1759 * @returns The CPU mode.
1760 * @param pVM The VM handle.
1761 */
1762CPUMDECL(CPUMMODE) CPUMGetGuestMode(PVM pVM)
1763{
1764 CPUMMODE enmMode;
1765 if (!(pVM->cpum.s.Guest.cr0 & X86_CR0_PE))
1766 enmMode = CPUMMODE_REAL;
1767 else //GUEST64 if (!(pVM->cpum.s.Guest.efer & MSR_K6_EFER_LMA)
1768 enmMode = CPUMMODE_PROTECTED;
1769//GUEST64 else
1770//GUEST64 enmMode = CPUMMODE_LONG;
1771
1772 return enmMode;
1773}
1774
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette