VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 9462

Last change on this file since 9462 was 9430, checked in by vboxsync, 17 years ago

Made the base of GDTR and IDTR 64 bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 46.8 KB
Line 
1/* $Id: CPUMAllRegs.cpp 9430 2008-06-05 15:28:07Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Gets and Sets.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38
39
40
41/** Disable stack frame pointer generation here. */
42#if defined(_MSC_VER) && !defined(DEBUG)
43# pragma optimize("y", off)
44#endif
45
46
47/**
48 * Sets or resets an alternative hypervisor context core.
49 *
50 * This is called when we get a hypervisor trap set switch the context
51 * core with the trap frame on the stack. It is called again to reset
52 * back to the default context core when resuming hypervisor execution.
53 *
54 * @param pVM The VM handle.
55 * @param pCtxCore Pointer to the alternative context core or NULL
56 * to go back to the default context core.
57 */
58CPUMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
59{
60 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTXALLSUFF(pHyperCore), pCtxCore));
61 if (!pCtxCore)
62 {
63 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
64 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
65 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
66 pVM->cpum.s.pHyperCoreGC = (RCPTRTYPE(PCPUMCTXCORE))VM_GUEST_ADDR(pVM, pCtxCore);
67 }
68 else
69 {
70 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
71 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
72 pVM->cpum.s.pHyperCoreGC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToGC(pVM, pCtxCore);
73 }
74}
75
76
77/**
78 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
79 * This is only for reading in order to save a few calls.
80 *
81 * @param pVM Handle to the virtual machine.
82 */
83CPUMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
84{
85 return pVM->cpum.s.CTXALLSUFF(pHyperCore);
86}
87
88
89/**
90 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
91 *
92 * @returns VBox status code.
93 * @param pVM Handle to the virtual machine.
94 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
95 *
96 * @deprecated This will *not* (and has never) given the right picture of the
97 * hypervisor register state. With CPUMHyperSetCtxCore() this is
98 * getting much worse. So, use the individual functions for getting
99 * and esp. setting the hypervisor registers.
100 */
101CPUMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
102{
103 *ppCtx = &pVM->cpum.s.Hyper;
104 return VINF_SUCCESS;
105}
106
107CPUMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
108{
109 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
110 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
111 pVM->cpum.s.Hyper.gdtrPadding = 0;
112}
113
114CPUMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
115{
116 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
117 pVM->cpum.s.Hyper.idtr.pIdt = addr;
118 pVM->cpum.s.Hyper.idtrPadding = 0;
119}
120
121CPUMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
122{
123 pVM->cpum.s.Hyper.cr3 = cr3;
124}
125
126CPUMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
127{
128 pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs = SelCS;
129}
130
131CPUMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
132{
133 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds = SelDS;
134}
135
136CPUMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
137{
138 pVM->cpum.s.CTXALLSUFF(pHyperCore)->es = SelES;
139}
140
141CPUMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
142{
143 pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs = SelFS;
144}
145
146CPUMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
147{
148 pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs = SelGS;
149}
150
151CPUMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
152{
153 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss = SelSS;
154}
155
156CPUMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
157{
158 pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp = u32ESP;
159}
160
161CPUMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
162{
163 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32 = Efl;
164 return VINF_SUCCESS;
165}
166
167CPUMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
168{
169 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip = u32EIP;
170}
171
172CPUMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
173{
174 pVM->cpum.s.Hyper.tr = SelTR;
175}
176
177CPUMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
178{
179 pVM->cpum.s.Hyper.ldtr = SelLDTR;
180}
181
182CPUMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
183{
184 pVM->cpum.s.Hyper.dr0 = uDr0;
185 /** @todo in GC we must load it! */
186}
187
188CPUMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
189{
190 pVM->cpum.s.Hyper.dr1 = uDr1;
191 /** @todo in GC we must load it! */
192}
193
194CPUMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
195{
196 pVM->cpum.s.Hyper.dr2 = uDr2;
197 /** @todo in GC we must load it! */
198}
199
200CPUMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
201{
202 pVM->cpum.s.Hyper.dr3 = uDr3;
203 /** @todo in GC we must load it! */
204}
205
206CPUMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
207{
208 pVM->cpum.s.Hyper.dr6 = uDr6;
209 /** @todo in GC we must load it! */
210}
211
212CPUMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
213{
214 pVM->cpum.s.Hyper.dr7 = uDr7;
215 /** @todo in GC we must load it! */
216}
217
218
219CPUMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
220{
221 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs;
222}
223
224CPUMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
225{
226 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds;
227}
228
229CPUMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
230{
231 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->es;
232}
233
234CPUMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
235{
236 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs;
237}
238
239CPUMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
240{
241 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs;
242}
243
244CPUMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
245{
246 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss;
247}
248
249#if 0 /* these are not correct. */
250
251CPUMDECL(uint32_t) CPUMGetHyperCR0(PVM pVM)
252{
253 return pVM->cpum.s.Hyper.cr0;
254}
255
256CPUMDECL(uint32_t) CPUMGetHyperCR2(PVM pVM)
257{
258 return pVM->cpum.s.Hyper.cr2;
259}
260
261CPUMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM)
262{
263 return pVM->cpum.s.Hyper.cr3;
264}
265
266CPUMDECL(uint32_t) CPUMGetHyperCR4(PVM pVM)
267{
268 return pVM->cpum.s.Hyper.cr4;
269}
270
271#endif /* not correct */
272
273CPUMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
274{
275 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eax;
276}
277
278CPUMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
279{
280 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebx;
281}
282
283CPUMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
284{
285 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ecx;
286}
287
288CPUMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
289{
290 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edx;
291}
292
293CPUMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
294{
295 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esi;
296}
297
298CPUMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
299{
300 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edi;
301}
302
303CPUMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
304{
305 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebp;
306}
307
308CPUMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
309{
310 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp;
311}
312
313CPUMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
314{
315 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32;
316}
317
318CPUMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
319{
320 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip;
321}
322
323CPUMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
324{
325 if (pcbLimit)
326 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
327 return pVM->cpum.s.Hyper.idtr.pIdt;
328}
329
330CPUMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
331{
332 if (pcbLimit)
333 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
334 return pVM->cpum.s.Hyper.gdtr.pGdt;
335}
336
337CPUMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
338{
339 return pVM->cpum.s.Hyper.ldtr;
340}
341
342CPUMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
343{
344 return pVM->cpum.s.Hyper.dr0;
345}
346
347CPUMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
348{
349 return pVM->cpum.s.Hyper.dr1;
350}
351
352CPUMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
353{
354 return pVM->cpum.s.Hyper.dr2;
355}
356
357CPUMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
358{
359 return pVM->cpum.s.Hyper.dr3;
360}
361
362CPUMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
363{
364 return pVM->cpum.s.Hyper.dr6;
365}
366
367CPUMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
368{
369 return pVM->cpum.s.Hyper.dr7;
370}
371
372
373/**
374 * Gets the pointer to the internal CPUMCTXCORE structure.
375 * This is only for reading in order to save a few calls.
376 *
377 * @param pVM Handle to the virtual machine.
378 */
379CPUMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
380{
381 return CPUMCTX2CORE(&pVM->cpum.s.Guest);
382}
383
384
385/**
386 * Sets the guest context core registers.
387 *
388 * @param pVM Handle to the virtual machine.
389 * @param pCtxCore The new context core values.
390 */
391CPUMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
392{
393 /** @todo #1410 requires selectors to be checked. */
394
395 PCPUMCTXCORE pCtxCoreDst CPUMCTX2CORE(&pVM->cpum.s.Guest);
396 *pCtxCoreDst = *pCtxCore;
397}
398
399
400/**
401 * Queries the pointer to the internal CPUMCTX structure
402 *
403 * @returns VBox status code.
404 * @param pVM Handle to the virtual machine.
405 * @param ppCtx Receives the CPUMCTX pointer when successful.
406 */
407CPUMDECL(int) CPUMQueryGuestCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
408{
409 *ppCtx = &pVM->cpum.s.Guest;
410 return VINF_SUCCESS;
411}
412
413
414CPUMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
415{
416 pVM->cpum.s.Guest.gdtr.cbGdt = limit;
417 pVM->cpum.s.Guest.gdtr.pGdt = addr;
418 pVM->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
419 return VINF_SUCCESS;
420}
421
422CPUMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
423{
424 pVM->cpum.s.Guest.idtr.cbIdt = limit;
425 pVM->cpum.s.Guest.idtr.pIdt = addr;
426 pVM->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
427 return VINF_SUCCESS;
428}
429
430CPUMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
431{
432 pVM->cpum.s.Guest.tr = tr;
433 pVM->cpum.s.fChanged |= CPUM_CHANGED_TR;
434 return VINF_SUCCESS;
435}
436
437CPUMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
438{
439 pVM->cpum.s.Guest.ldtr = ldtr;
440 pVM->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
441 return VINF_SUCCESS;
442}
443
444
445/**
446 * Set the guest CR0.
447 *
448 * When called in GC, the hyper CR0 may be updated if that is
449 * required. The caller only has to take special action if AM,
450 * WP, PG or PE changes.
451 *
452 * @returns VINF_SUCCESS (consider it void).
453 * @param pVM Pointer to the shared VM structure.
454 * @param cr0 The new CR0 value.
455 */
456CPUMDECL(int) CPUMSetGuestCR0(PVM pVM, uint64_t cr0)
457{
458#ifdef IN_GC
459 /*
460 * Check if we need to change hypervisor CR0 because
461 * of math stuff.
462 */
463 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
464 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
465 {
466 if (!(pVM->cpum.s.fUseFlags & CPUM_USED_FPU))
467 {
468 /*
469 * We haven't saved the host FPU state yet, so TS and MT are both set
470 * and EM should be reflecting the guest EM (it always does this).
471 */
472 if ((cr0 & X86_CR0_EM) != (pVM->cpum.s.Guest.cr0 & X86_CR0_EM))
473 {
474 uint32_t HyperCR0 = ASMGetCR0();
475 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
476 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
477 HyperCR0 &= ~X86_CR0_EM;
478 HyperCR0 |= cr0 & X86_CR0_EM;
479 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
480 ASMSetCR0(HyperCR0);
481 }
482#ifdef VBOX_STRICT
483 else
484 {
485 uint32_t HyperCR0 = ASMGetCR0();
486 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
487 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
488 }
489#endif
490 }
491 else
492 {
493 /*
494 * Already saved the state, so we're just mirroring
495 * the guest flags.
496 */
497 uint32_t HyperCR0 = ASMGetCR0();
498 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
499 == (pVM->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
500 ("%#x %#x\n", HyperCR0, pVM->cpum.s.Guest.cr0));
501 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
502 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
503 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
504 ASMSetCR0(HyperCR0);
505 }
506 }
507#endif
508
509 /*
510 * Check for changes causing TLB flushes (for REM).
511 * The caller is responsible for calling PGM when appropriate.
512 */
513 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
514 != (pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
515 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
516 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR0;
517
518 pVM->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
519 return VINF_SUCCESS;
520}
521
522CPUMDECL(int) CPUMSetGuestCR2(PVM pVM, uint64_t cr2)
523{
524 pVM->cpum.s.Guest.cr2 = cr2;
525 return VINF_SUCCESS;
526}
527
528CPUMDECL(int) CPUMSetGuestCR3(PVM pVM, uint64_t cr3)
529{
530 pVM->cpum.s.Guest.cr3 = cr3;
531 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR3;
532 return VINF_SUCCESS;
533}
534
535CPUMDECL(int) CPUMSetGuestCR4(PVM pVM, uint64_t cr4)
536{
537 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
538 != (pVM->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
539 pVM->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
540 pVM->cpum.s.fChanged |= CPUM_CHANGED_CR4;
541 if (!CPUMSupportsFXSR(pVM))
542 cr4 &= ~X86_CR4_OSFSXR;
543 pVM->cpum.s.Guest.cr4 = cr4;
544 return VINF_SUCCESS;
545}
546
547CPUMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
548{
549 pVM->cpum.s.Guest.eflags.u32 = eflags;
550 return VINF_SUCCESS;
551}
552
553CPUMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
554{
555 pVM->cpum.s.Guest.eip = eip;
556 return VINF_SUCCESS;
557}
558
559CPUMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
560{
561 pVM->cpum.s.Guest.eax = eax;
562 return VINF_SUCCESS;
563}
564
565CPUMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
566{
567 pVM->cpum.s.Guest.ebx = ebx;
568 return VINF_SUCCESS;
569}
570
571CPUMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
572{
573 pVM->cpum.s.Guest.ecx = ecx;
574 return VINF_SUCCESS;
575}
576
577CPUMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
578{
579 pVM->cpum.s.Guest.edx = edx;
580 return VINF_SUCCESS;
581}
582
583CPUMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
584{
585 pVM->cpum.s.Guest.esp = esp;
586 return VINF_SUCCESS;
587}
588
589CPUMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
590{
591 pVM->cpum.s.Guest.ebp = ebp;
592 return VINF_SUCCESS;
593}
594
595CPUMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
596{
597 pVM->cpum.s.Guest.esi = esi;
598 return VINF_SUCCESS;
599}
600
601CPUMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
602{
603 pVM->cpum.s.Guest.edi = edi;
604 return VINF_SUCCESS;
605}
606
607CPUMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
608{
609 pVM->cpum.s.Guest.ss = ss;
610 return VINF_SUCCESS;
611}
612
613CPUMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
614{
615 pVM->cpum.s.Guest.cs = cs;
616 return VINF_SUCCESS;
617}
618
619CPUMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
620{
621 pVM->cpum.s.Guest.ds = ds;
622 return VINF_SUCCESS;
623}
624
625CPUMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
626{
627 pVM->cpum.s.Guest.es = es;
628 return VINF_SUCCESS;
629}
630
631CPUMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
632{
633 pVM->cpum.s.Guest.fs = fs;
634 return VINF_SUCCESS;
635}
636
637CPUMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
638{
639 pVM->cpum.s.Guest.gs = gs;
640 return VINF_SUCCESS;
641}
642
643CPUMDECL(void) CPUMSetGuestEFER(PVM pVM, uint64_t val)
644{
645 pVM->cpum.s.Guest.msrEFER = val;
646}
647
648CPUMDECL(uint32_t) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
649{
650 if (pcbLimit)
651 *pcbLimit = pVM->cpum.s.Guest.idtr.cbIdt;
652 return pVM->cpum.s.Guest.idtr.pIdt;
653}
654
655CPUMDECL(RTSEL) CPUMGetGuestTR(PVM pVM)
656{
657 return pVM->cpum.s.Guest.tr;
658}
659
660CPUMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
661{
662 return pVM->cpum.s.Guest.cs;
663}
664
665CPUMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
666{
667 return pVM->cpum.s.Guest.ds;
668}
669
670CPUMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
671{
672 return pVM->cpum.s.Guest.es;
673}
674
675CPUMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
676{
677 return pVM->cpum.s.Guest.fs;
678}
679
680CPUMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
681{
682 return pVM->cpum.s.Guest.gs;
683}
684
685CPUMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
686{
687 return pVM->cpum.s.Guest.ss;
688}
689
690CPUMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
691{
692 return pVM->cpum.s.Guest.ldtr;
693}
694
695CPUMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM)
696{
697 return pVM->cpum.s.Guest.cr0;
698}
699
700CPUMDECL(uint64_t) CPUMGetGuestCR2(PVM pVM)
701{
702 return pVM->cpum.s.Guest.cr2;
703}
704
705CPUMDECL(uint64_t) CPUMGetGuestCR3(PVM pVM)
706{
707 return pVM->cpum.s.Guest.cr3;
708}
709
710CPUMDECL(uint64_t) CPUMGetGuestCR4(PVM pVM)
711{
712 return pVM->cpum.s.Guest.cr4;
713}
714
715CPUMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
716{
717 *pGDTR = pVM->cpum.s.Guest.gdtr;
718}
719
720CPUMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
721{
722 return pVM->cpum.s.Guest.eip;
723}
724
725CPUMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
726{
727 return pVM->cpum.s.Guest.eax;
728}
729
730CPUMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
731{
732 return pVM->cpum.s.Guest.ebx;
733}
734
735CPUMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
736{
737 return pVM->cpum.s.Guest.ecx;
738}
739
740CPUMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
741{
742 return pVM->cpum.s.Guest.edx;
743}
744
745CPUMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
746{
747 return pVM->cpum.s.Guest.esi;
748}
749
750CPUMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
751{
752 return pVM->cpum.s.Guest.edi;
753}
754
755CPUMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
756{
757 return pVM->cpum.s.Guest.esp;
758}
759
760CPUMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
761{
762 return pVM->cpum.s.Guest.ebp;
763}
764
765CPUMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
766{
767 return pVM->cpum.s.Guest.eflags.u32;
768}
769
770CPUMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)
771{
772 return &pVM->cpum.s.Guest.trHid;
773}
774
775//@todo: crx should be an array
776CPUMDECL(int) CPUMGetGuestCRx(PVM pVM, unsigned iReg, uint64_t *pValue)
777{
778 switch (iReg)
779 {
780 case USE_REG_CR0:
781 *pValue = pVM->cpum.s.Guest.cr0;
782 break;
783 case USE_REG_CR2:
784 *pValue = pVM->cpum.s.Guest.cr2;
785 break;
786 case USE_REG_CR3:
787 *pValue = pVM->cpum.s.Guest.cr3;
788 break;
789 case USE_REG_CR4:
790 *pValue = pVM->cpum.s.Guest.cr4;
791 break;
792 default:
793 return VERR_INVALID_PARAMETER;
794 }
795 return VINF_SUCCESS;
796}
797
798CPUMDECL(RTGCUINTREG) CPUMGetGuestDR0(PVM pVM)
799{
800 return pVM->cpum.s.Guest.dr0;
801}
802
803CPUMDECL(RTGCUINTREG) CPUMGetGuestDR1(PVM pVM)
804{
805 return pVM->cpum.s.Guest.dr1;
806}
807
808CPUMDECL(RTGCUINTREG) CPUMGetGuestDR2(PVM pVM)
809{
810 return pVM->cpum.s.Guest.dr2;
811}
812
813CPUMDECL(RTGCUINTREG) CPUMGetGuestDR3(PVM pVM)
814{
815 return pVM->cpum.s.Guest.dr3;
816}
817
818CPUMDECL(RTGCUINTREG) CPUMGetGuestDR6(PVM pVM)
819{
820 return pVM->cpum.s.Guest.dr6;
821}
822
823CPUMDECL(RTGCUINTREG) CPUMGetGuestDR7(PVM pVM)
824{
825 return pVM->cpum.s.Guest.dr7;
826}
827
828/** @todo drx should be an array */
829CPUMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint32_t *pValue)
830{
831 switch (iReg)
832 {
833 case USE_REG_DR0:
834 *pValue = pVM->cpum.s.Guest.dr0;
835 break;
836 case USE_REG_DR1:
837 *pValue = pVM->cpum.s.Guest.dr1;
838 break;
839 case USE_REG_DR2:
840 *pValue = pVM->cpum.s.Guest.dr2;
841 break;
842 case USE_REG_DR3:
843 *pValue = pVM->cpum.s.Guest.dr3;
844 break;
845 case USE_REG_DR4:
846 case USE_REG_DR6:
847 *pValue = pVM->cpum.s.Guest.dr6;
848 break;
849 case USE_REG_DR5:
850 case USE_REG_DR7:
851 *pValue = pVM->cpum.s.Guest.dr7;
852 break;
853
854 default:
855 return VERR_INVALID_PARAMETER;
856 }
857 return VINF_SUCCESS;
858}
859
860CPUMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM)
861{
862 return pVM->cpum.s.Guest.msrEFER;
863}
864
865/**
866 * Gets a CpuId leaf.
867 *
868 * @param pVM The VM handle.
869 * @param iLeaf The CPUID leaf to get.
870 * @param pEax Where to store the EAX value.
871 * @param pEbx Where to store the EBX value.
872 * @param pEcx Where to store the ECX value.
873 * @param pEdx Where to store the EDX value.
874 */
875CPUMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
876{
877 PCCPUMCPUID pCpuId;
878 if (iLeaf < ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
879 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
880 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
881 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
882 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
883 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
884 else
885 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
886
887 *pEax = pCpuId->eax;
888 *pEbx = pCpuId->ebx;
889 *pEcx = pCpuId->ecx;
890 *pEdx = pCpuId->edx;
891 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
892}
893
894/**
895 * Gets a pointer to the array of standard CPUID leafs.
896 *
897 * CPUMGetGuestCpuIdStdMax() give the size of the array.
898 *
899 * @returns Pointer to the standard CPUID leafs (read-only).
900 * @param pVM The VM handle.
901 * @remark Intended for PATM.
902 */
903CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdGCPtr(PVM pVM)
904{
905 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
906}
907
908/**
909 * Gets a pointer to the array of extended CPUID leafs.
910 *
911 * CPUMGetGuestCpuIdExtMax() give the size of the array.
912 *
913 * @returns Pointer to the extended CPUID leafs (read-only).
914 * @param pVM The VM handle.
915 * @remark Intended for PATM.
916 */
917CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtGCPtr(PVM pVM)
918{
919 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
920}
921
922/**
923 * Gets a pointer to the array of centaur CPUID leafs.
924 *
925 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
926 *
927 * @returns Pointer to the centaur CPUID leafs (read-only).
928 * @param pVM The VM handle.
929 * @remark Intended for PATM.
930 */
931CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurGCPtr(PVM pVM)
932{
933 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
934}
935
936/**
937 * Gets a pointer to the default CPUID leaf.
938 *
939 * @returns Pointer to the default CPUID leaf (read-only).
940 * @param pVM The VM handle.
941 * @remark Intended for PATM.
942 */
943CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefGCPtr(PVM pVM)
944{
945 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
946}
947
948/**
949 * Gets a number of standard CPUID leafs.
950 *
951 * @returns Number of leafs.
952 * @param pVM The VM handle.
953 * @remark Intended for PATM.
954 */
955CPUMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
956{
957 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
958}
959
960/**
961 * Gets a number of extended CPUID leafs.
962 *
963 * @returns Number of leafs.
964 * @param pVM The VM handle.
965 * @remark Intended for PATM.
966 */
967CPUMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
968{
969 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
970}
971
972/**
973 * Gets a number of centaur CPUID leafs.
974 *
975 * @returns Number of leafs.
976 * @param pVM The VM handle.
977 * @remark Intended for PATM.
978 */
979CPUMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
980{
981 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
982}
983
984/**
985 * Sets a CPUID feature bit.
986 *
987 * @param pVM The VM Handle.
988 * @param enmFeature The feature to set.
989 */
990CPUMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
991{
992 switch (enmFeature)
993 {
994 /*
995 * Set the APIC bit in both feature masks.
996 */
997 case CPUMCPUIDFEATURE_APIC:
998 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
999 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1000 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1001 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1002 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1003 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1004 break;
1005
1006 /*
1007 * Set the sysenter/sysexit bit in the standard feature mask.
1008 * Assumes the caller knows what it's doing! (host must support these)
1009 */
1010 case CPUMCPUIDFEATURE_SEP:
1011 {
1012 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1013 {
1014 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1015 return;
1016 }
1017
1018 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1019 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1020 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1021 break;
1022 }
1023
1024 /*
1025 * Set the syscall/sysret bit in the extended feature mask.
1026 * Assumes the caller knows what it's doing! (host must support these)
1027 */
1028 case CPUMCPUIDFEATURE_SYSCALL:
1029 {
1030 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1031 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1032 {
1033 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1034 return;
1035 }
1036 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1037 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1038 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1039 break;
1040 }
1041
1042 /*
1043 * Set the PAE bit in both feature masks.
1044 * Assumes the caller knows what it's doing! (host must support these)
1045 */
1046 case CPUMCPUIDFEATURE_PAE:
1047 {
1048 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1049 {
1050 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1051 return;
1052 }
1053
1054 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1055 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1056 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1057 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1058 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1059 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1060 break;
1061 }
1062
1063 /*
1064 * Set the LONG MODE bit in the extended feature mask.
1065 * Assumes the caller knows what it's doing! (host must support these)
1066 */
1067 case CPUMCPUIDFEATURE_LONG_MODE:
1068 {
1069 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1070 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1071 {
1072 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1073 return;
1074 }
1075
1076 /* Valid for both Intel and AMD. */
1077 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1078 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1079 break;
1080 }
1081
1082 /*
1083 * Set the NXE bit in the extended feature mask.
1084 * Assumes the caller knows what it's doing! (host must support these)
1085 */
1086 case CPUMCPUIDFEATURE_NXE:
1087 {
1088 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1089 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1090 {
1091 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1092 return;
1093 }
1094
1095 /* Valid for both Intel and AMD. */
1096 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1097 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1098 break;
1099 }
1100
1101 case CPUMCPUIDFEATURE_LAHF:
1102 {
1103 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1104 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1105 {
1106 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1107 return;
1108 }
1109
1110 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1111 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1112 break;
1113 }
1114
1115 default:
1116 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1117 break;
1118 }
1119 pVM->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1120}
1121
1122/**
1123 * Queries a CPUID feature bit.
1124 *
1125 * @returns boolean for feature presence
1126 * @param pVM The VM Handle.
1127 * @param enmFeature The feature to query.
1128 */
1129CPUMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1130{
1131 switch (enmFeature)
1132 {
1133 case CPUMCPUIDFEATURE_PAE:
1134 {
1135 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1136 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1137 break;
1138 }
1139
1140 default:
1141 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1142 break;
1143 }
1144 return false;
1145}
1146
1147/**
1148 * Clears a CPUID feature bit.
1149 *
1150 * @param pVM The VM Handle.
1151 * @param enmFeature The feature to clear.
1152 */
1153CPUMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1154{
1155 switch (enmFeature)
1156 {
1157 /*
1158 * Set the APIC bit in both feature masks.
1159 */
1160 case CPUMCPUIDFEATURE_APIC:
1161 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1162 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1163 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1164 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1165 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1166 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1167 break;
1168
1169 case CPUMCPUIDFEATURE_PAE:
1170 {
1171 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1172 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1173 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1174 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1175 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1176 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1177 break;
1178 }
1179
1180 default:
1181 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1182 break;
1183 }
1184 pVM->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1185}
1186
1187/**
1188 * Gets the CPU vendor
1189 *
1190 * @returns CPU vendor
1191 * @param pVM The VM handle.
1192 */
1193CPUMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM)
1194{
1195 return pVM->cpum.s.enmCPUVendor;
1196}
1197
1198
1199CPUMDECL(int) CPUMSetGuestDR0(PVM pVM, RTGCUINTREG uDr0)
1200{
1201 pVM->cpum.s.Guest.dr0 = uDr0;
1202 return CPUMRecalcHyperDRx(pVM);
1203}
1204
1205CPUMDECL(int) CPUMSetGuestDR1(PVM pVM, RTGCUINTREG uDr1)
1206{
1207 pVM->cpum.s.Guest.dr1 = uDr1;
1208 return CPUMRecalcHyperDRx(pVM);
1209}
1210
1211CPUMDECL(int) CPUMSetGuestDR2(PVM pVM, RTGCUINTREG uDr2)
1212{
1213 pVM->cpum.s.Guest.dr2 = uDr2;
1214 return CPUMRecalcHyperDRx(pVM);
1215}
1216
1217CPUMDECL(int) CPUMSetGuestDR3(PVM pVM, RTGCUINTREG uDr3)
1218{
1219 pVM->cpum.s.Guest.dr3 = uDr3;
1220 return CPUMRecalcHyperDRx(pVM);
1221}
1222
1223CPUMDECL(int) CPUMSetGuestDR6(PVM pVM, RTGCUINTREG uDr6)
1224{
1225 pVM->cpum.s.Guest.dr6 = uDr6;
1226 return CPUMRecalcHyperDRx(pVM);
1227}
1228
1229CPUMDECL(int) CPUMSetGuestDR7(PVM pVM, RTGCUINTREG uDr7)
1230{
1231 pVM->cpum.s.Guest.dr7 = uDr7;
1232 return CPUMRecalcHyperDRx(pVM);
1233}
1234
1235/** @todo drx should be an array */
1236CPUMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint32_t Value)
1237{
1238 switch (iReg)
1239 {
1240 case USE_REG_DR0:
1241 pVM->cpum.s.Guest.dr0 = Value;
1242 break;
1243 case USE_REG_DR1:
1244 pVM->cpum.s.Guest.dr1 = Value;
1245 break;
1246 case USE_REG_DR2:
1247 pVM->cpum.s.Guest.dr2 = Value;
1248 break;
1249 case USE_REG_DR3:
1250 pVM->cpum.s.Guest.dr3 = Value;
1251 break;
1252 case USE_REG_DR4:
1253 case USE_REG_DR6:
1254 pVM->cpum.s.Guest.dr6 = Value;
1255 break;
1256 case USE_REG_DR5:
1257 case USE_REG_DR7:
1258 pVM->cpum.s.Guest.dr7 = Value;
1259 break;
1260
1261 default:
1262 return VERR_INVALID_PARAMETER;
1263 }
1264 return CPUMRecalcHyperDRx(pVM);
1265}
1266
1267
1268/**
1269 * Recalculates the hypvervisor DRx register values based on
1270 * current guest registers and DBGF breakpoints.
1271 *
1272 * This is called whenever a guest DRx register is modified and when DBGF
1273 * sets a hardware breakpoint. In guest context this function will reload
1274 * any (hyper) DRx registers which comes out with a different value.
1275 *
1276 * @returns VINF_SUCCESS.
1277 * @param pVM The VM handle.
1278 */
1279CPUMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1280{
1281 /*
1282 * Compare the DR7s first.
1283 *
1284 * We only care about the enabled flags. The GE and LE flags are always
1285 * set and we don't care if the guest doesn't set them. GD is virtualized
1286 * when we dispatch #DB, we never enable it.
1287 */
1288 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1289#ifdef CPUM_VIRTUALIZE_DRX
1290 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1291#else
1292 const RTGCUINTREG uGstDr7 = 0;
1293#endif
1294 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1295 {
1296 /*
1297 * Ok, something is enabled. Recalc each of the breakpoints.
1298 * Straight forward code, not optimized/minimized in any way.
1299 */
1300 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1301
1302 /* bp 0 */
1303 RTGCUINTREG uNewDr0;
1304 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1305 {
1306 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1307 uNewDr0 = DBGFBpGetDR0(pVM);
1308 }
1309 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1310 {
1311 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1312 uNewDr0 = CPUMGetGuestDR0(pVM);
1313 }
1314 else
1315 uNewDr0 = pVM->cpum.s.Hyper.dr0;
1316
1317 /* bp 1 */
1318 RTGCUINTREG uNewDr1;
1319 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1320 {
1321 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1322 uNewDr1 = DBGFBpGetDR1(pVM);
1323 }
1324 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1325 {
1326 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1327 uNewDr1 = CPUMGetGuestDR1(pVM);
1328 }
1329 else
1330 uNewDr1 = pVM->cpum.s.Hyper.dr1;
1331
1332 /* bp 2 */
1333 RTGCUINTREG uNewDr2;
1334 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1335 {
1336 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1337 uNewDr2 = DBGFBpGetDR2(pVM);
1338 }
1339 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1340 {
1341 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1342 uNewDr2 = CPUMGetGuestDR2(pVM);
1343 }
1344 else
1345 uNewDr2 = pVM->cpum.s.Hyper.dr2;
1346
1347 /* bp 3 */
1348 RTGCUINTREG uNewDr3;
1349 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1350 {
1351 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1352 uNewDr3 = DBGFBpGetDR3(pVM);
1353 }
1354 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1355 {
1356 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1357 uNewDr3 = CPUMGetGuestDR3(pVM);
1358 }
1359 else
1360 uNewDr3 = pVM->cpum.s.Hyper.dr3;
1361
1362 /*
1363 * Apply the updates.
1364 */
1365#ifdef IN_GC
1366 if (!(pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1367 {
1368 /** @todo save host DBx registers. */
1369 }
1370#endif
1371 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1372 if (uNewDr3 != pVM->cpum.s.Hyper.dr3)
1373 CPUMSetHyperDR3(pVM, uNewDr3);
1374 if (uNewDr2 != pVM->cpum.s.Hyper.dr2)
1375 CPUMSetHyperDR2(pVM, uNewDr2);
1376 if (uNewDr1 != pVM->cpum.s.Hyper.dr1)
1377 CPUMSetHyperDR1(pVM, uNewDr1);
1378 if (uNewDr0 != pVM->cpum.s.Hyper.dr0)
1379 CPUMSetHyperDR0(pVM, uNewDr0);
1380 if (uNewDr7 != pVM->cpum.s.Hyper.dr7)
1381 CPUMSetHyperDR7(pVM, uNewDr7);
1382 }
1383 else
1384 {
1385#ifdef IN_GC
1386 if (pVM->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1387 {
1388 /** @todo restore host DBx registers. */
1389 }
1390#endif
1391 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1392 }
1393 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1394 pVM->cpum.s.fUseFlags, pVM->cpum.s.Hyper.dr0, pVM->cpum.s.Hyper.dr1,
1395 pVM->cpum.s.Hyper.dr2, pVM->cpum.s.Hyper.dr3, pVM->cpum.s.Hyper.dr6,
1396 pVM->cpum.s.Hyper.dr7));
1397
1398 return VINF_SUCCESS;
1399}
1400
1401#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1402
1403/**
1404 * Transforms the guest CPU state to raw-ring mode.
1405 *
1406 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1407 *
1408 * @returns VBox status. (recompiler failure)
1409 * @param pVM VM handle.
1410 * @param pCtxCore The context core (for trap usage).
1411 * @see @ref pg_raw
1412 */
1413CPUMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1414{
1415 Assert(!pVM->cpum.s.fRawEntered);
1416 if (!pCtxCore)
1417 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Guest);
1418
1419 /*
1420 * Are we in Ring-0?
1421 */
1422 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1423 && !pCtxCore->eflags.Bits.u1VM)
1424 {
1425 /*
1426 * Enter execution mode.
1427 */
1428 PATMRawEnter(pVM, pCtxCore);
1429
1430 /*
1431 * Set CPL to Ring-1.
1432 */
1433 pCtxCore->ss |= 1;
1434 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1435 pCtxCore->cs |= 1;
1436 }
1437 else
1438 {
1439 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1440 ("ring-1 code not supported\n"));
1441 /*
1442 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1443 */
1444 PATMRawEnter(pVM, pCtxCore);
1445 }
1446
1447 /*
1448 * Assert sanity.
1449 */
1450 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1451 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1452 || pCtxCore->eflags.Bits.u1VM,
1453 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1454 Assert((pVM->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1455 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1456
1457 pVM->cpum.s.fRawEntered = true;
1458 return VINF_SUCCESS;
1459}
1460
1461
1462/**
1463 * Transforms the guest CPU state from raw-ring mode to correct values.
1464 *
1465 * This function will change any selector registers with DPL=1 to DPL=0.
1466 *
1467 * @returns Adjusted rc.
1468 * @param pVM VM handle.
1469 * @param rc Raw mode return code
1470 * @param pCtxCore The context core (for trap usage).
1471 * @see @ref pg_raw
1472 */
1473CPUMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1474{
1475 /*
1476 * Don't leave if we've already left (in GC).
1477 */
1478 Assert(pVM->cpum.s.fRawEntered);
1479 if (!pVM->cpum.s.fRawEntered)
1480 return rc;
1481 pVM->cpum.s.fRawEntered = false;
1482
1483 PCPUMCTX pCtx = &pVM->cpum.s.Guest;
1484 if (!pCtxCore)
1485 pCtxCore = CPUMCTX2CORE(pCtx);
1486 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1487 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1488 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1489
1490 /*
1491 * Are we executing in raw ring-1?
1492 */
1493 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1494 && !pCtxCore->eflags.Bits.u1VM)
1495 {
1496 /*
1497 * Leave execution mode.
1498 */
1499 PATMRawLeave(pVM, pCtxCore, rc);
1500 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1501 /** @todo See what happens if we remove this. */
1502 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1503 pCtxCore->ds &= ~X86_SEL_RPL;
1504 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1505 pCtxCore->es &= ~X86_SEL_RPL;
1506 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1507 pCtxCore->fs &= ~X86_SEL_RPL;
1508 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1509 pCtxCore->gs &= ~X86_SEL_RPL;
1510
1511 /*
1512 * Ring-1 selector => Ring-0.
1513 */
1514 pCtxCore->ss &= ~X86_SEL_RPL;
1515 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1516 pCtxCore->cs &= ~X86_SEL_RPL;
1517 }
1518 else
1519 {
1520 /*
1521 * PATM is taking care of the IOPL and IF flags for us.
1522 */
1523 PATMRawLeave(pVM, pCtxCore, rc);
1524 if (!pCtxCore->eflags.Bits.u1VM)
1525 {
1526 /** @todo See what happens if we remove this. */
1527 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1528 pCtxCore->ds &= ~X86_SEL_RPL;
1529 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1530 pCtxCore->es &= ~X86_SEL_RPL;
1531 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1532 pCtxCore->fs &= ~X86_SEL_RPL;
1533 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1534 pCtxCore->gs &= ~X86_SEL_RPL;
1535 }
1536 }
1537
1538 return rc;
1539}
1540
1541/**
1542 * Updates the EFLAGS while we're in raw-mode.
1543 *
1544 * @param pVM The VM handle.
1545 * @param pCtxCore The context core.
1546 * @param eflags The new EFLAGS value.
1547 */
1548CPUMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1549{
1550 if (!pVM->cpum.s.fRawEntered)
1551 {
1552 pCtxCore->eflags.u32 = eflags;
1553 return;
1554 }
1555 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1556}
1557
1558#endif /* !IN_RING0 */
1559
1560/**
1561 * Gets the EFLAGS while we're in raw-mode.
1562 *
1563 * @returns The eflags.
1564 * @param pVM The VM handle.
1565 * @param pCtxCore The context core.
1566 */
1567CPUMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1568{
1569#ifdef IN_RING0
1570 return pCtxCore->eflags.u32;
1571#else
1572 if (!pVM->cpum.s.fRawEntered)
1573 return pCtxCore->eflags.u32;
1574 return PATMRawGetEFlags(pVM, pCtxCore);
1575#endif
1576}
1577
1578
1579
1580
1581/**
1582 * Gets and resets the changed flags (CPUM_CHANGED_*).
1583 * Only REM should call this function.
1584 *
1585 * @returns The changed flags.
1586 * @param pVM The VM handle.
1587 */
1588CPUMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
1589{
1590 unsigned fFlags = pVM->cpum.s.fChanged;
1591 pVM->cpum.s.fChanged = 0;
1592 /** @todo change the switcher to use the fChanged flags. */
1593 if (pVM->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
1594 {
1595 fFlags |= CPUM_CHANGED_FPU_REM;
1596 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1597 }
1598 return fFlags;
1599}
1600
1601/**
1602 * Sets the specified changed flags (CPUM_CHANGED_*).
1603 *
1604 * @param pVM The VM handle.
1605 */
1606CPUMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
1607{
1608 pVM->cpum.s.fChanged |= fChangedFlags;
1609}
1610
1611/**
1612 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1613 * @returns true if supported.
1614 * @returns false if not supported.
1615 * @param pVM The VM handle.
1616 */
1617CPUMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1618{
1619 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1620}
1621
1622
1623/**
1624 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1625 * @returns true if used.
1626 * @returns false if not used.
1627 * @param pVM The VM handle.
1628 */
1629CPUMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1630{
1631 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSENTER) != 0;
1632}
1633
1634
1635/**
1636 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1637 * @returns true if used.
1638 * @returns false if not used.
1639 * @param pVM The VM handle.
1640 */
1641CPUMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1642{
1643 return (pVM->cpum.s.fUseFlags & CPUM_USE_SYSCALL) != 0;
1644}
1645
1646
1647#ifndef IN_RING3
1648/**
1649 * Lazily sync in the FPU/XMM state
1650 *
1651 * @returns VBox status code.
1652 * @param pVM VM handle.
1653 */
1654CPUMDECL(int) CPUMHandleLazyFPU(PVM pVM)
1655{
1656 return CPUMHandleLazyFPUAsm(&pVM->cpum.s);
1657}
1658
1659
1660/**
1661 * Restore host FPU/XMM state
1662 *
1663 * @returns VBox status code.
1664 * @param pVM VM handle.
1665 */
1666CPUMDECL(int) CPUMRestoreHostFPUState(PVM pVM)
1667{
1668 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
1669 return CPUMRestoreHostFPUStateAsm(&pVM->cpum.s);
1670}
1671#endif /* !IN_RING3 */
1672
1673
1674/**
1675 * Checks if we activated the FPU/XMM state of the guest OS
1676 * @returns true if we did.
1677 * @returns false if not.
1678 * @param pVM The VM handle.
1679 */
1680CPUMDECL(bool) CPUMIsGuestFPUStateActive(PVM pVM)
1681{
1682 return (pVM->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1683}
1684
1685
1686/**
1687 * Deactivate the FPU/XMM state of the guest OS
1688 * @param pVM The VM handle.
1689 */
1690CPUMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
1691{
1692 pVM->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1693}
1694
1695
1696/**
1697 * Checks if the hidden selector registers are valid
1698 * @returns true if they are.
1699 * @returns false if not.
1700 * @param pVM The VM handle.
1701 */
1702CPUMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
1703{
1704 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
1705}
1706
1707
1708/**
1709 * Checks if the hidden selector registers are valid
1710 * @param pVM The VM handle.
1711 * @param fValid Valid or not
1712 */
1713CPUMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
1714{
1715 pVM->cpum.s.fValidHiddenSelRegs = fValid;
1716}
1717
1718
1719/**
1720 * Get the current privilege level of the guest.
1721 *
1722 * @returns cpl
1723 * @param pVM VM Handle.
1724 * @param pRegFrame Trap register frame.
1725 */
1726CPUMDECL(uint32_t) CPUMGetGuestCPL(PVM pVM, PCPUMCTXCORE pCtxCore)
1727{
1728 uint32_t cpl;
1729
1730 if (CPUMAreHiddenSelRegsValid(pVM))
1731 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
1732 else if (RT_LIKELY(pVM->cpum.s.Guest.cr0 & X86_CR0_PE))
1733 {
1734 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
1735 {
1736 cpl = (pCtxCore->ss & X86_SEL_RPL);
1737#ifndef IN_RING0
1738 if (cpl == 1)
1739 cpl = 0;
1740#endif
1741 }
1742 else
1743 cpl = 3;
1744 }
1745 else
1746 cpl = 0; /* real mode; cpl is zero */
1747
1748 return cpl;
1749}
1750
1751
1752/**
1753 * Gets the current guest CPU mode.
1754 *
1755 * If paging mode is what you need, check out PGMGetGuestMode().
1756 *
1757 * @returns The CPU mode.
1758 * @param pVM The VM handle.
1759 */
1760CPUMDECL(CPUMMODE) CPUMGetGuestMode(PVM pVM)
1761{
1762 CPUMMODE enmMode;
1763 if (!(pVM->cpum.s.Guest.cr0 & X86_CR0_PE))
1764 enmMode = CPUMMODE_REAL;
1765 else //GUEST64 if (!(pVM->cpum.s.Guest.efer & MSR_K6_EFER_LMA)
1766 enmMode = CPUMMODE_PROTECTED;
1767//GUEST64 else
1768//GUEST64 enmMode = CPUMMODE_LONG;
1769
1770 return enmMode;
1771}
1772
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette