VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 26993

Last change on this file since 26993 was 26993, checked in by vboxsync, 15 years ago

VMM: implement some Nehalem MSRs

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 58.7 KB
Line 
1/* $Id: CPUMAllRegs.cpp 26993 2010-03-03 14:23:59Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <VBox/hwaccm.h>
37#include <VBox/tm.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#ifdef IN_RING3
41#include <iprt/thread.h>
42#endif
43
44/** Disable stack frame pointer generation here. */
45#if defined(_MSC_VER) && !defined(DEBUG)
46# pragma optimize("y", off)
47#endif
48
49
50/**
51 * Sets or resets an alternative hypervisor context core.
52 *
53 * This is called when we get a hypervisor trap set switch the context
54 * core with the trap frame on the stack. It is called again to reset
55 * back to the default context core when resuming hypervisor execution.
56 *
57 * @param pVCpu The VMCPU handle.
58 * @param pCtxCore Pointer to the alternative context core or NULL
59 * to go back to the default context core.
60 */
61VMMDECL(void) CPUMHyperSetCtxCore(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
62{
63 PVM pVM = pVCpu->CTX_SUFF(pVM);
64
65 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVCpu->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
66 if (!pCtxCore)
67 {
68 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
69 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
70 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
71 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
72 }
73 else
74 {
75 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
76 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
77 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
78 }
79}
80
81
82/**
83 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
84 * This is only for reading in order to save a few calls.
85 *
86 * @param pVM Handle to the virtual machine.
87 */
88VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
89{
90 return pVCpu->cpum.s.CTX_SUFF(pHyperCore);
91}
92
93
94/**
95 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
96 *
97 * @returns VBox status code.
98 * @param pVM Handle to the virtual machine.
99 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
100 *
101 * @deprecated This will *not* (and has never) given the right picture of the
102 * hypervisor register state. With CPUMHyperSetCtxCore() this is
103 * getting much worse. So, use the individual functions for getting
104 * and esp. setting the hypervisor registers.
105 */
106VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx)
107{
108 *ppCtx = &pVCpu->cpum.s.Hyper;
109 return VINF_SUCCESS;
110}
111
112
113VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
114{
115 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
116 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
117 pVCpu->cpum.s.Hyper.gdtrPadding = 0;
118}
119
120
121VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
122{
123 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
124 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
125 pVCpu->cpum.s.Hyper.idtrPadding = 0;
126}
127
128
129VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
130{
131 pVCpu->cpum.s.Hyper.cr3 = cr3;
132
133#ifdef IN_RC
134 /* Update the current CR3. */
135 ASMSetCR3(cr3);
136#endif
137}
138
139VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
140{
141 return pVCpu->cpum.s.Hyper.cr3;
142}
143
144
145VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
146{
147 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
148}
149
150
151VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
152{
153 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
154}
155
156
157VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
158{
159 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
160}
161
162
163VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
164{
165 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
166}
167
168
169VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
170{
171 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
172}
173
174
175VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
176{
177 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
178}
179
180
181VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
182{
183 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
184}
185
186
187VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
188{
189 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
190 return VINF_SUCCESS;
191}
192
193
194VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
195{
196 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
197}
198
199
200VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
201{
202 pVCpu->cpum.s.Hyper.tr = SelTR;
203}
204
205
206VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
207{
208 pVCpu->cpum.s.Hyper.ldtr = SelLDTR;
209}
210
211
212VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
213{
214 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
215 /** @todo in GC we must load it! */
216}
217
218
219VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
220{
221 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
222 /** @todo in GC we must load it! */
223}
224
225
226VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
227{
228 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
229 /** @todo in GC we must load it! */
230}
231
232
233VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
234{
235 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
236 /** @todo in GC we must load it! */
237}
238
239
240VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
241{
242 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
243 /** @todo in GC we must load it! */
244}
245
246
247VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
248{
249 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
250 /** @todo in GC we must load it! */
251}
252
253
254VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
255{
256 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs;
257}
258
259
260VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
261{
262 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds;
263}
264
265
266VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
267{
268 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es;
269}
270
271
272VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
273{
274 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs;
275}
276
277
278VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
279{
280 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs;
281}
282
283
284VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
285{
286 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss;
287}
288
289
290VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
291{
292 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eax;
293}
294
295
296VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
297{
298 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebx;
299}
300
301
302VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
303{
304 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ecx;
305}
306
307
308VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
309{
310 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edx;
311}
312
313
314VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
315{
316 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esi;
317}
318
319
320VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
321{
322 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edi;
323}
324
325
326VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
327{
328 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebp;
329}
330
331
332VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
333{
334 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp;
335}
336
337
338VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
339{
340 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
341}
342
343
344VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
345{
346 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip;
347}
348
349
350VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
351{
352 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->rip;
353}
354
355
356VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
357{
358 if (pcbLimit)
359 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
360 return pVCpu->cpum.s.Hyper.idtr.pIdt;
361}
362
363
364VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
365{
366 if (pcbLimit)
367 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
368 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
369}
370
371
372VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
373{
374 return pVCpu->cpum.s.Hyper.ldtr;
375}
376
377
378VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
379{
380 return pVCpu->cpum.s.Hyper.dr[0];
381}
382
383
384VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
385{
386 return pVCpu->cpum.s.Hyper.dr[1];
387}
388
389
390VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
391{
392 return pVCpu->cpum.s.Hyper.dr[2];
393}
394
395
396VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
397{
398 return pVCpu->cpum.s.Hyper.dr[3];
399}
400
401
402VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
403{
404 return pVCpu->cpum.s.Hyper.dr[6];
405}
406
407
408VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
409{
410 return pVCpu->cpum.s.Hyper.dr[7];
411}
412
413
414/**
415 * Gets the pointer to the internal CPUMCTXCORE structure.
416 * This is only for reading in order to save a few calls.
417 *
418 * @param pVCpu Handle to the virtual cpu.
419 */
420VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
421{
422 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
423}
424
425
426/**
427 * Sets the guest context core registers.
428 *
429 * @param pVCpu Handle to the virtual cpu.
430 * @param pCtxCore The new context core values.
431 */
432VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
433{
434 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
435
436 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
437 *pCtxCoreDst = *pCtxCore;
438
439 /* Mask away invalid parts of the cpu context. */
440 if (!CPUMIsGuestInLongMode(pVCpu))
441 {
442 uint64_t u64Mask = UINT64_C(0xffffffff);
443
444 pCtxCoreDst->rip &= u64Mask;
445 pCtxCoreDst->rax &= u64Mask;
446 pCtxCoreDst->rbx &= u64Mask;
447 pCtxCoreDst->rcx &= u64Mask;
448 pCtxCoreDst->rdx &= u64Mask;
449 pCtxCoreDst->rsi &= u64Mask;
450 pCtxCoreDst->rdi &= u64Mask;
451 pCtxCoreDst->rbp &= u64Mask;
452 pCtxCoreDst->rsp &= u64Mask;
453 pCtxCoreDst->rflags.u &= u64Mask;
454
455 pCtxCoreDst->r8 = 0;
456 pCtxCoreDst->r9 = 0;
457 pCtxCoreDst->r10 = 0;
458 pCtxCoreDst->r11 = 0;
459 pCtxCoreDst->r12 = 0;
460 pCtxCoreDst->r13 = 0;
461 pCtxCoreDst->r14 = 0;
462 pCtxCoreDst->r15 = 0;
463 }
464}
465
466
467/**
468 * Queries the pointer to the internal CPUMCTX structure
469 *
470 * @returns The CPUMCTX pointer.
471 * @param pVCpu Handle to the virtual cpu.
472 */
473VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
474{
475 return &pVCpu->cpum.s.Guest;
476}
477
478VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
479{
480 pVCpu->cpum.s.Guest.gdtr.cbGdt = limit;
481 pVCpu->cpum.s.Guest.gdtr.pGdt = addr;
482 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
483 return VINF_SUCCESS;
484}
485
486VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
487{
488 pVCpu->cpum.s.Guest.idtr.cbIdt = limit;
489 pVCpu->cpum.s.Guest.idtr.pIdt = addr;
490 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
491 return VINF_SUCCESS;
492}
493
494VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
495{
496 AssertMsgFailed(("Need to load the hidden bits too!\n"));
497
498 pVCpu->cpum.s.Guest.tr = tr;
499 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
500 return VINF_SUCCESS;
501}
502
503VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
504{
505 pVCpu->cpum.s.Guest.ldtr = ldtr;
506 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
507 return VINF_SUCCESS;
508}
509
510
511/**
512 * Set the guest CR0.
513 *
514 * When called in GC, the hyper CR0 may be updated if that is
515 * required. The caller only has to take special action if AM,
516 * WP, PG or PE changes.
517 *
518 * @returns VINF_SUCCESS (consider it void).
519 * @param pVCpu Handle to the virtual cpu.
520 * @param cr0 The new CR0 value.
521 */
522VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
523{
524#ifdef IN_RC
525 /*
526 * Check if we need to change hypervisor CR0 because
527 * of math stuff.
528 */
529 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
530 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
531 {
532 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
533 {
534 /*
535 * We haven't saved the host FPU state yet, so TS and MT are both set
536 * and EM should be reflecting the guest EM (it always does this).
537 */
538 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
539 {
540 uint32_t HyperCR0 = ASMGetCR0();
541 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
542 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
543 HyperCR0 &= ~X86_CR0_EM;
544 HyperCR0 |= cr0 & X86_CR0_EM;
545 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
546 ASMSetCR0(HyperCR0);
547 }
548# ifdef VBOX_STRICT
549 else
550 {
551 uint32_t HyperCR0 = ASMGetCR0();
552 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
553 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
554 }
555# endif
556 }
557 else
558 {
559 /*
560 * Already saved the state, so we're just mirroring
561 * the guest flags.
562 */
563 uint32_t HyperCR0 = ASMGetCR0();
564 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
565 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
566 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
567 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
568 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
569 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
570 ASMSetCR0(HyperCR0);
571 }
572 }
573#endif /* IN_RC */
574
575 /*
576 * Check for changes causing TLB flushes (for REM).
577 * The caller is responsible for calling PGM when appropriate.
578 */
579 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
580 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
581 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
582 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
583
584 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
585 return VINF_SUCCESS;
586}
587
588
589VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
590{
591 pVCpu->cpum.s.Guest.cr2 = cr2;
592 return VINF_SUCCESS;
593}
594
595
596VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
597{
598 pVCpu->cpum.s.Guest.cr3 = cr3;
599 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
600 return VINF_SUCCESS;
601}
602
603
604VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
605{
606 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
607 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
608 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
609 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
610 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
611 cr4 &= ~X86_CR4_OSFSXR;
612 pVCpu->cpum.s.Guest.cr4 = cr4;
613 return VINF_SUCCESS;
614}
615
616
617VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
618{
619 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
620 return VINF_SUCCESS;
621}
622
623
624VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
625{
626 pVCpu->cpum.s.Guest.eip = eip;
627 return VINF_SUCCESS;
628}
629
630
631VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
632{
633 pVCpu->cpum.s.Guest.eax = eax;
634 return VINF_SUCCESS;
635}
636
637
638VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
639{
640 pVCpu->cpum.s.Guest.ebx = ebx;
641 return VINF_SUCCESS;
642}
643
644
645VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
646{
647 pVCpu->cpum.s.Guest.ecx = ecx;
648 return VINF_SUCCESS;
649}
650
651
652VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
653{
654 pVCpu->cpum.s.Guest.edx = edx;
655 return VINF_SUCCESS;
656}
657
658
659VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
660{
661 pVCpu->cpum.s.Guest.esp = esp;
662 return VINF_SUCCESS;
663}
664
665
666VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
667{
668 pVCpu->cpum.s.Guest.ebp = ebp;
669 return VINF_SUCCESS;
670}
671
672
673VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
674{
675 pVCpu->cpum.s.Guest.esi = esi;
676 return VINF_SUCCESS;
677}
678
679
680VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
681{
682 pVCpu->cpum.s.Guest.edi = edi;
683 return VINF_SUCCESS;
684}
685
686
687VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
688{
689 pVCpu->cpum.s.Guest.ss = ss;
690 return VINF_SUCCESS;
691}
692
693
694VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
695{
696 pVCpu->cpum.s.Guest.cs = cs;
697 return VINF_SUCCESS;
698}
699
700
701VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
702{
703 pVCpu->cpum.s.Guest.ds = ds;
704 return VINF_SUCCESS;
705}
706
707
708VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
709{
710 pVCpu->cpum.s.Guest.es = es;
711 return VINF_SUCCESS;
712}
713
714
715VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
716{
717 pVCpu->cpum.s.Guest.fs = fs;
718 return VINF_SUCCESS;
719}
720
721
722VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
723{
724 pVCpu->cpum.s.Guest.gs = gs;
725 return VINF_SUCCESS;
726}
727
728
729VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
730{
731 pVCpu->cpum.s.Guest.msrEFER = val;
732}
733
734
735VMMDECL(uint64_t) CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
736{
737 uint64_t u64 = 0;
738 uint8_t u8Multiplier = 4;
739
740 switch (idMsr)
741 {
742 case MSR_IA32_TSC:
743 u64 = TMCpuTickGet(pVCpu);
744 break;
745
746 case MSR_IA32_CR_PAT:
747 u64 = pVCpu->cpum.s.Guest.msrPAT;
748 break;
749
750 case MSR_IA32_SYSENTER_CS:
751 u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
752 break;
753
754 case MSR_IA32_SYSENTER_EIP:
755 u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
756 break;
757
758 case MSR_IA32_SYSENTER_ESP:
759 u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
760 break;
761
762 case MSR_K6_EFER:
763 u64 = pVCpu->cpum.s.Guest.msrEFER;
764 break;
765
766 case MSR_K8_SF_MASK:
767 u64 = pVCpu->cpum.s.Guest.msrSFMASK;
768 break;
769
770 case MSR_K6_STAR:
771 u64 = pVCpu->cpum.s.Guest.msrSTAR;
772 break;
773
774 case MSR_K8_LSTAR:
775 u64 = pVCpu->cpum.s.Guest.msrLSTAR;
776 break;
777
778 case MSR_K8_CSTAR:
779 u64 = pVCpu->cpum.s.Guest.msrCSTAR;
780 break;
781
782 case MSR_K8_KERNEL_GS_BASE:
783 u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
784 break;
785
786 case MSR_K8_TSC_AUX:
787 u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
788 break;
789
790 case MSR_IA32_PERF_STATUS:
791 /** @todo: could really be not exactly correct, maybe use host's values */
792 /* Keep consistent with helper_rdmsr() in REM */
793 u64 = (1000ULL /* TSC increment by tick */)
794 | ((uint64_t)u8Multiplier << 40 /* CPU multiplier */ );
795 break;
796
797 case MSR_IA32_PLATFORM_INFO:
798 u64 = ((u8Multiplier)<<8 /* Flex ratio max */)
799 | ((uint64_t)u8Multiplier << 40 /* Flex ratio min */ );
800 break;
801
802 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
803 default:
804 AssertFailed();
805 break;
806 }
807 return u64;
808}
809
810VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
811{
812 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
813 switch (idMsr)
814 {
815 case MSR_K8_TSC_AUX:
816 pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
817 break;
818
819 default:
820 AssertFailed();
821 break;
822 }
823}
824
825VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
826{
827 if (pcbLimit)
828 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
829 return pVCpu->cpum.s.Guest.idtr.pIdt;
830}
831
832
833VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
834{
835 if (pHidden)
836 *pHidden = pVCpu->cpum.s.Guest.trHid;
837 return pVCpu->cpum.s.Guest.tr;
838}
839
840
841VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
842{
843 return pVCpu->cpum.s.Guest.cs;
844}
845
846
847VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
848{
849 return pVCpu->cpum.s.Guest.ds;
850}
851
852
853VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
854{
855 return pVCpu->cpum.s.Guest.es;
856}
857
858
859VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
860{
861 return pVCpu->cpum.s.Guest.fs;
862}
863
864
865VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
866{
867 return pVCpu->cpum.s.Guest.gs;
868}
869
870
871VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
872{
873 return pVCpu->cpum.s.Guest.ss;
874}
875
876
877VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
878{
879 return pVCpu->cpum.s.Guest.ldtr;
880}
881
882
883VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
884{
885 return pVCpu->cpum.s.Guest.cr0;
886}
887
888
889VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
890{
891 return pVCpu->cpum.s.Guest.cr2;
892}
893
894
895VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
896{
897 return pVCpu->cpum.s.Guest.cr3;
898}
899
900
901VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
902{
903 return pVCpu->cpum.s.Guest.cr4;
904}
905
906
907VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
908{
909 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
910}
911
912
913VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
914{
915 return pVCpu->cpum.s.Guest.eip;
916}
917
918
919VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
920{
921 return pVCpu->cpum.s.Guest.rip;
922}
923
924
925VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
926{
927 return pVCpu->cpum.s.Guest.eax;
928}
929
930
931VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
932{
933 return pVCpu->cpum.s.Guest.ebx;
934}
935
936
937VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
938{
939 return pVCpu->cpum.s.Guest.ecx;
940}
941
942
943VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
944{
945 return pVCpu->cpum.s.Guest.edx;
946}
947
948
949VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
950{
951 return pVCpu->cpum.s.Guest.esi;
952}
953
954
955VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
956{
957 return pVCpu->cpum.s.Guest.edi;
958}
959
960
961VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
962{
963 return pVCpu->cpum.s.Guest.esp;
964}
965
966
967VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
968{
969 return pVCpu->cpum.s.Guest.ebp;
970}
971
972
973VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
974{
975 return pVCpu->cpum.s.Guest.eflags.u32;
976}
977
978
979///@todo: crx should be an array
980VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
981{
982 switch (iReg)
983 {
984 case USE_REG_CR0:
985 *pValue = pVCpu->cpum.s.Guest.cr0;
986 break;
987 case USE_REG_CR2:
988 *pValue = pVCpu->cpum.s.Guest.cr2;
989 break;
990 case USE_REG_CR3:
991 *pValue = pVCpu->cpum.s.Guest.cr3;
992 break;
993 case USE_REG_CR4:
994 *pValue = pVCpu->cpum.s.Guest.cr4;
995 break;
996 default:
997 return VERR_INVALID_PARAMETER;
998 }
999 return VINF_SUCCESS;
1000}
1001
1002
1003VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1004{
1005 return pVCpu->cpum.s.Guest.dr[0];
1006}
1007
1008
1009VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1010{
1011 return pVCpu->cpum.s.Guest.dr[1];
1012}
1013
1014
1015VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1016{
1017 return pVCpu->cpum.s.Guest.dr[2];
1018}
1019
1020
1021VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1022{
1023 return pVCpu->cpum.s.Guest.dr[3];
1024}
1025
1026
1027VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1028{
1029 return pVCpu->cpum.s.Guest.dr[6];
1030}
1031
1032
1033VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1034{
1035 return pVCpu->cpum.s.Guest.dr[7];
1036}
1037
1038
1039VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1040{
1041 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1042 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1043 if (iReg == 4 || iReg == 5)
1044 iReg += 2;
1045 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1046 return VINF_SUCCESS;
1047}
1048
1049
1050VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1051{
1052 return pVCpu->cpum.s.Guest.msrEFER;
1053}
1054
1055
1056/**
1057 * Gets a CpuId leaf.
1058 *
1059 * @param pVCpu The VMCPU handle.
1060 * @param iLeaf The CPUID leaf to get.
1061 * @param pEax Where to store the EAX value.
1062 * @param pEbx Where to store the EBX value.
1063 * @param pEcx Where to store the ECX value.
1064 * @param pEdx Where to store the EDX value.
1065 */
1066VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1067{
1068 PVM pVM = pVCpu->CTX_SUFF(pVM);
1069
1070 PCCPUMCPUID pCpuId;
1071 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1072 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1073 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1074 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1075 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1076 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1077 else
1078 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1079
1080 uint32_t cCurrentCacheIndex = *pEcx;
1081
1082 *pEax = pCpuId->eax;
1083 *pEbx = pCpuId->ebx;
1084 *pEcx = pCpuId->ecx;
1085 *pEdx = pCpuId->edx;
1086
1087 if ( iLeaf == 1)
1088 {
1089 /* Bits 31-24: Initial APIC ID */
1090 Assert(pVCpu->idCpu <= 255);
1091 *pEbx |= (pVCpu->idCpu << 24);
1092 }
1093
1094 if ( iLeaf == 4
1095 && cCurrentCacheIndex < 3
1096 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1097 {
1098 uint32_t type, level, sharing, linesize,
1099 partitions, associativity, sets, cores;
1100
1101 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1102 partitions = 1;
1103 /* Those are only to shut up compiler, as they will always
1104 get overwritten, and compiler should be able to figure that out */
1105 sets = associativity = sharing = level = 1;
1106 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1107 switch (cCurrentCacheIndex)
1108 {
1109 case 0:
1110 type = 1;
1111 level = 1;
1112 sharing = 1;
1113 linesize = 64;
1114 associativity = 8;
1115 sets = 64;
1116 break;
1117 case 1:
1118 level = 1;
1119 type = 2;
1120 sharing = 1;
1121 linesize = 64;
1122 associativity = 8;
1123 sets = 64;
1124 break;
1125 default: /* shut up gcc.*/
1126 AssertFailed();
1127 case 2:
1128 level = 2;
1129 type = 3;
1130 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1131 linesize = 64;
1132 associativity = 24;
1133 sets = 4096;
1134 break;
1135 }
1136
1137 *pEax |= ((cores - 1) << 26) |
1138 ((sharing - 1) << 14) |
1139 (level << 5) |
1140 1;
1141 *pEbx = (linesize - 1) |
1142 ((partitions - 1) << 12) |
1143 ((associativity - 1) << 22); /* -1 encoding */
1144 *pEcx = sets - 1;
1145 }
1146
1147 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1148}
1149
1150/**
1151 * Gets a number of standard CPUID leafs.
1152 *
1153 * @returns Number of leafs.
1154 * @param pVM The VM handle.
1155 * @remark Intended for PATM.
1156 */
1157VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1158{
1159 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1160}
1161
1162
1163/**
1164 * Gets a number of extended CPUID leafs.
1165 *
1166 * @returns Number of leafs.
1167 * @param pVM The VM handle.
1168 * @remark Intended for PATM.
1169 */
1170VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1171{
1172 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1173}
1174
1175
1176/**
1177 * Gets a number of centaur CPUID leafs.
1178 *
1179 * @returns Number of leafs.
1180 * @param pVM The VM handle.
1181 * @remark Intended for PATM.
1182 */
1183VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1184{
1185 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1186}
1187
1188
1189/**
1190 * Sets a CPUID feature bit.
1191 *
1192 * @param pVM The VM Handle.
1193 * @param enmFeature The feature to set.
1194 */
1195VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1196{
1197 switch (enmFeature)
1198 {
1199 /*
1200 * Set the APIC bit in both feature masks.
1201 */
1202 case CPUMCPUIDFEATURE_APIC:
1203 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1204 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1205 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1206 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1207 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1208 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1209 break;
1210
1211 /*
1212 * Set the x2APIC bit in the standard feature mask.
1213 */
1214 case CPUMCPUIDFEATURE_X2APIC:
1215 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1216 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1217 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1218 break;
1219
1220 /*
1221 * Set the sysenter/sysexit bit in the standard feature mask.
1222 * Assumes the caller knows what it's doing! (host must support these)
1223 */
1224 case CPUMCPUIDFEATURE_SEP:
1225 {
1226 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1227 {
1228 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1229 return;
1230 }
1231
1232 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1233 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1234 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1235 break;
1236 }
1237
1238 /*
1239 * Set the syscall/sysret bit in the extended feature mask.
1240 * Assumes the caller knows what it's doing! (host must support these)
1241 */
1242 case CPUMCPUIDFEATURE_SYSCALL:
1243 {
1244 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1245 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1246 {
1247#if HC_ARCH_BITS == 32
1248 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1249 * Even when the cpu is capable of doing so in 64 bits mode.
1250 */
1251 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1252 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1253 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1254#endif
1255 {
1256 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1257 return;
1258 }
1259 }
1260 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1261 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1262 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1263 break;
1264 }
1265
1266 /*
1267 * Set the PAE bit in both feature masks.
1268 * Assumes the caller knows what it's doing! (host must support these)
1269 */
1270 case CPUMCPUIDFEATURE_PAE:
1271 {
1272 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1273 {
1274 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1275 return;
1276 }
1277
1278 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1279 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1280 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1281 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1282 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1283 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1284 break;
1285 }
1286
1287 /*
1288 * Set the LONG MODE bit in the extended feature mask.
1289 * Assumes the caller knows what it's doing! (host must support these)
1290 */
1291 case CPUMCPUIDFEATURE_LONG_MODE:
1292 {
1293 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1294 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1295 {
1296 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1297 return;
1298 }
1299
1300 /* Valid for both Intel and AMD. */
1301 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1302 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1303 break;
1304 }
1305
1306 /*
1307 * Set the NXE bit in the extended feature mask.
1308 * Assumes the caller knows what it's doing! (host must support these)
1309 */
1310 case CPUMCPUIDFEATURE_NXE:
1311 {
1312 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1313 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1314 {
1315 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1316 return;
1317 }
1318
1319 /* Valid for both Intel and AMD. */
1320 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1321 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1322 break;
1323 }
1324
1325 case CPUMCPUIDFEATURE_LAHF:
1326 {
1327 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1328 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1329 {
1330 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1331 return;
1332 }
1333
1334 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1335 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1336 break;
1337 }
1338
1339 case CPUMCPUIDFEATURE_PAT:
1340 {
1341 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1342 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1343 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1344 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1345 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1346 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1347 break;
1348 }
1349
1350 case CPUMCPUIDFEATURE_RDTSCP:
1351 {
1352 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1353 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP))
1354 {
1355 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1356 return;
1357 }
1358
1359 /* Valid for AMD only (for now). */
1360 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1361 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1362 break;
1363 }
1364
1365 default:
1366 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1367 break;
1368 }
1369 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1370 {
1371 PVMCPU pVCpu = &pVM->aCpus[i];
1372 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1373 }
1374}
1375
1376
1377/**
1378 * Queries a CPUID feature bit.
1379 *
1380 * @returns boolean for feature presence
1381 * @param pVM The VM Handle.
1382 * @param enmFeature The feature to query.
1383 */
1384VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1385{
1386 switch (enmFeature)
1387 {
1388 case CPUMCPUIDFEATURE_PAE:
1389 {
1390 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1391 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1392 break;
1393 }
1394
1395 case CPUMCPUIDFEATURE_RDTSCP:
1396 {
1397 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1398 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1399 break;
1400 }
1401
1402 case CPUMCPUIDFEATURE_LONG_MODE:
1403 {
1404 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1405 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1406 break;
1407 }
1408
1409 default:
1410 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1411 break;
1412 }
1413 return false;
1414}
1415
1416
1417/**
1418 * Clears a CPUID feature bit.
1419 *
1420 * @param pVM The VM Handle.
1421 * @param enmFeature The feature to clear.
1422 */
1423VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1424{
1425 switch (enmFeature)
1426 {
1427 /*
1428 * Set the APIC bit in both feature masks.
1429 */
1430 case CPUMCPUIDFEATURE_APIC:
1431 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1432 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1433 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1434 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1435 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1436 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1437 break;
1438
1439 /*
1440 * Clear the x2APIC bit in the standard feature mask.
1441 */
1442 case CPUMCPUIDFEATURE_X2APIC:
1443 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1444 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1445 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1446 break;
1447
1448 case CPUMCPUIDFEATURE_PAE:
1449 {
1450 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1451 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1452 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1453 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1454 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1455 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1456 break;
1457 }
1458
1459 case CPUMCPUIDFEATURE_PAT:
1460 {
1461 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1462 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1463 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1464 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1465 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1466 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1467 break;
1468 }
1469
1470 case CPUMCPUIDFEATURE_LONG_MODE:
1471 {
1472 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1473 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1474 break;
1475 }
1476
1477 case CPUMCPUIDFEATURE_LAHF:
1478 {
1479 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1480 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1481 break;
1482 }
1483
1484 default:
1485 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1486 break;
1487 }
1488 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1489 {
1490 PVMCPU pVCpu = &pVM->aCpus[i];
1491 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1492 }
1493}
1494
1495
1496/**
1497 * Gets the host CPU vendor
1498 *
1499 * @returns CPU vendor
1500 * @param pVM The VM handle.
1501 */
1502VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1503{
1504 return pVM->cpum.s.enmHostCpuVendor;
1505}
1506
1507/**
1508 * Gets the CPU vendor
1509 *
1510 * @returns CPU vendor
1511 * @param pVM The VM handle.
1512 */
1513VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1514{
1515 return pVM->cpum.s.enmGuestCpuVendor;
1516}
1517
1518
1519VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1520{
1521 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1522 return CPUMRecalcHyperDRx(pVCpu);
1523}
1524
1525
1526VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1527{
1528 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1529 return CPUMRecalcHyperDRx(pVCpu);
1530}
1531
1532
1533VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1534{
1535 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1536 return CPUMRecalcHyperDRx(pVCpu);
1537}
1538
1539
1540VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1541{
1542 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1543 return CPUMRecalcHyperDRx(pVCpu);
1544}
1545
1546
1547VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1548{
1549 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1550 return CPUMRecalcHyperDRx(pVCpu);
1551}
1552
1553
1554VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1555{
1556 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1557 return CPUMRecalcHyperDRx(pVCpu);
1558}
1559
1560
1561VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1562{
1563 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1564 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1565 if (iReg == 4 || iReg == 5)
1566 iReg += 2;
1567 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1568 return CPUMRecalcHyperDRx(pVCpu);
1569}
1570
1571
1572/**
1573 * Recalculates the hypvervisor DRx register values based on
1574 * current guest registers and DBGF breakpoints.
1575 *
1576 * This is called whenever a guest DRx register is modified and when DBGF
1577 * sets a hardware breakpoint. In guest context this function will reload
1578 * any (hyper) DRx registers which comes out with a different value.
1579 *
1580 * @returns VINF_SUCCESS.
1581 * @param pVCpu The VMCPU handle.
1582 */
1583VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1584{
1585 PVM pVM = pVCpu->CTX_SUFF(pVM);
1586
1587 /*
1588 * Compare the DR7s first.
1589 *
1590 * We only care about the enabled flags. The GE and LE flags are always
1591 * set and we don't care if the guest doesn't set them. GD is virtualized
1592 * when we dispatch #DB, we never enable it.
1593 */
1594 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1595#ifdef CPUM_VIRTUALIZE_DRX
1596 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1597#else
1598 const RTGCUINTREG uGstDr7 = 0;
1599#endif
1600 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1601 {
1602 /*
1603 * Ok, something is enabled. Recalc each of the breakpoints.
1604 * Straight forward code, not optimized/minimized in any way.
1605 */
1606 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1607
1608 /* bp 0 */
1609 RTGCUINTREG uNewDr0;
1610 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1611 {
1612 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1613 uNewDr0 = DBGFBpGetDR0(pVM);
1614 }
1615 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1616 {
1617 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1618 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1619 }
1620 else
1621 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
1622
1623 /* bp 1 */
1624 RTGCUINTREG uNewDr1;
1625 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1626 {
1627 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1628 uNewDr1 = DBGFBpGetDR1(pVM);
1629 }
1630 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1631 {
1632 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1633 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1634 }
1635 else
1636 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
1637
1638 /* bp 2 */
1639 RTGCUINTREG uNewDr2;
1640 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1641 {
1642 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1643 uNewDr2 = DBGFBpGetDR2(pVM);
1644 }
1645 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1646 {
1647 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1648 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1649 }
1650 else
1651 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
1652
1653 /* bp 3 */
1654 RTGCUINTREG uNewDr3;
1655 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1656 {
1657 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1658 uNewDr3 = DBGFBpGetDR3(pVM);
1659 }
1660 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1661 {
1662 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1663 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1664 }
1665 else
1666 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
1667
1668 /*
1669 * Apply the updates.
1670 */
1671#ifdef IN_RC
1672 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1673 {
1674 /** @todo save host DBx registers. */
1675 }
1676#endif
1677 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1678 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1679 CPUMSetHyperDR3(pVCpu, uNewDr3);
1680 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1681 CPUMSetHyperDR2(pVCpu, uNewDr2);
1682 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1683 CPUMSetHyperDR1(pVCpu, uNewDr1);
1684 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1685 CPUMSetHyperDR0(pVCpu, uNewDr0);
1686 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1687 CPUMSetHyperDR7(pVCpu, uNewDr7);
1688 }
1689 else
1690 {
1691#ifdef IN_RC
1692 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1693 {
1694 /** @todo restore host DBx registers. */
1695 }
1696#endif
1697 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1698 }
1699 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1700 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1701 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1702 pVCpu->cpum.s.Hyper.dr[7]));
1703
1704 return VINF_SUCCESS;
1705}
1706
1707
1708/**
1709 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1710 *
1711 * @returns true if in real mode, otherwise false.
1712 * @param pVCpu The virtual CPU handle.
1713 */
1714VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1715{
1716 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1717}
1718
1719
1720/**
1721 * Tests if the guest has the Page Size Extension enabled (PSE).
1722 *
1723 * @returns true if in real mode, otherwise false.
1724 * @param pVCpu The virtual CPU handle.
1725 */
1726VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1727{
1728 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1729 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1730}
1731
1732
1733/**
1734 * Tests if the guest has the paging enabled (PG).
1735 *
1736 * @returns true if in real mode, otherwise false.
1737 * @param pVCpu The virtual CPU handle.
1738 */
1739VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1740{
1741 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1742}
1743
1744
1745/**
1746 * Tests if the guest has the paging enabled (PG).
1747 *
1748 * @returns true if in real mode, otherwise false.
1749 * @param pVCpu The virtual CPU handle.
1750 */
1751VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1752{
1753 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1754}
1755
1756
1757/**
1758 * Tests if the guest is running in real mode or not.
1759 *
1760 * @returns true if in real mode, otherwise false.
1761 * @param pVCpu The virtual CPU handle.
1762 */
1763VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1764{
1765 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1766}
1767
1768
1769/**
1770 * Tests if the guest is running in protected or not.
1771 *
1772 * @returns true if in protected mode, otherwise false.
1773 * @param pVCpu The virtual CPU handle.
1774 */
1775VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1776{
1777 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1778}
1779
1780
1781/**
1782 * Tests if the guest is running in paged protected or not.
1783 *
1784 * @returns true if in paged protected mode, otherwise false.
1785 * @param pVCpu The virtual CPU handle.
1786 */
1787VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1788{
1789 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1790}
1791
1792
1793/**
1794 * Tests if the guest is running in long mode or not.
1795 *
1796 * @returns true if in long mode, otherwise false.
1797 * @param pVCpu The virtual CPU handle.
1798 */
1799VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1800{
1801 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1802}
1803
1804
1805/**
1806 * Tests if the guest is running in PAE mode or not.
1807 *
1808 * @returns true if in PAE mode, otherwise false.
1809 * @param pVCpu The virtual CPU handle.
1810 */
1811VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1812{
1813 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1814 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
1815 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1816}
1817
1818
1819
1820#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1821
1822/**
1823 * Transforms the guest CPU state to raw-ring mode.
1824 *
1825 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1826 *
1827 * @returns VBox status. (recompiler failure)
1828 * @param pVCpu The VMCPU handle.
1829 * @param pCtxCore The context core (for trap usage).
1830 * @see @ref pg_raw
1831 */
1832VMMDECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1833{
1834 PVM pVM = pVCpu->CTX_SUFF(pVM);
1835
1836 Assert(!pVM->cpum.s.fRawEntered);
1837 if (!pCtxCore)
1838 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
1839
1840 /*
1841 * Are we in Ring-0?
1842 */
1843 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1844 && !pCtxCore->eflags.Bits.u1VM)
1845 {
1846 /*
1847 * Enter execution mode.
1848 */
1849 PATMRawEnter(pVM, pCtxCore);
1850
1851 /*
1852 * Set CPL to Ring-1.
1853 */
1854 pCtxCore->ss |= 1;
1855 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1856 pCtxCore->cs |= 1;
1857 }
1858 else
1859 {
1860 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1861 ("ring-1 code not supported\n"));
1862 /*
1863 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1864 */
1865 PATMRawEnter(pVM, pCtxCore);
1866 }
1867
1868 /*
1869 * Assert sanity.
1870 */
1871 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1872 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1873 || pCtxCore->eflags.Bits.u1VM,
1874 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1875 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1876 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1877
1878 pVM->cpum.s.fRawEntered = true;
1879 return VINF_SUCCESS;
1880}
1881
1882
1883/**
1884 * Transforms the guest CPU state from raw-ring mode to correct values.
1885 *
1886 * This function will change any selector registers with DPL=1 to DPL=0.
1887 *
1888 * @returns Adjusted rc.
1889 * @param pVCpu The VMCPU handle.
1890 * @param rc Raw mode return code
1891 * @param pCtxCore The context core (for trap usage).
1892 * @see @ref pg_raw
1893 */
1894VMMDECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
1895{
1896 PVM pVM = pVCpu->CTX_SUFF(pVM);
1897
1898 /*
1899 * Don't leave if we've already left (in GC).
1900 */
1901 Assert(pVM->cpum.s.fRawEntered);
1902 if (!pVM->cpum.s.fRawEntered)
1903 return rc;
1904 pVM->cpum.s.fRawEntered = false;
1905
1906 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1907 if (!pCtxCore)
1908 pCtxCore = CPUMCTX2CORE(pCtx);
1909 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1910 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1911 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1912
1913 /*
1914 * Are we executing in raw ring-1?
1915 */
1916 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1917 && !pCtxCore->eflags.Bits.u1VM)
1918 {
1919 /*
1920 * Leave execution mode.
1921 */
1922 PATMRawLeave(pVM, pCtxCore, rc);
1923 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1924 /** @todo See what happens if we remove this. */
1925 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1926 pCtxCore->ds &= ~X86_SEL_RPL;
1927 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1928 pCtxCore->es &= ~X86_SEL_RPL;
1929 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1930 pCtxCore->fs &= ~X86_SEL_RPL;
1931 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1932 pCtxCore->gs &= ~X86_SEL_RPL;
1933
1934 /*
1935 * Ring-1 selector => Ring-0.
1936 */
1937 pCtxCore->ss &= ~X86_SEL_RPL;
1938 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1939 pCtxCore->cs &= ~X86_SEL_RPL;
1940 }
1941 else
1942 {
1943 /*
1944 * PATM is taking care of the IOPL and IF flags for us.
1945 */
1946 PATMRawLeave(pVM, pCtxCore, rc);
1947 if (!pCtxCore->eflags.Bits.u1VM)
1948 {
1949 /** @todo See what happens if we remove this. */
1950 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1951 pCtxCore->ds &= ~X86_SEL_RPL;
1952 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1953 pCtxCore->es &= ~X86_SEL_RPL;
1954 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1955 pCtxCore->fs &= ~X86_SEL_RPL;
1956 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1957 pCtxCore->gs &= ~X86_SEL_RPL;
1958 }
1959 }
1960
1961 return rc;
1962}
1963
1964/**
1965 * Updates the EFLAGS while we're in raw-mode.
1966 *
1967 * @param pVCpu The VMCPU handle.
1968 * @param pCtxCore The context core.
1969 * @param eflags The new EFLAGS value.
1970 */
1971VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1972{
1973 PVM pVM = pVCpu->CTX_SUFF(pVM);
1974
1975 if (!pVM->cpum.s.fRawEntered)
1976 {
1977 pCtxCore->eflags.u32 = eflags;
1978 return;
1979 }
1980 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1981}
1982
1983#endif /* !IN_RING0 */
1984
1985/**
1986 * Gets the EFLAGS while we're in raw-mode.
1987 *
1988 * @returns The eflags.
1989 * @param pVCpu The VMCPU handle.
1990 * @param pCtxCore The context core.
1991 */
1992VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1993{
1994#ifdef IN_RING0
1995 return pCtxCore->eflags.u32;
1996#else
1997 PVM pVM = pVCpu->CTX_SUFF(pVM);
1998
1999 if (!pVM->cpum.s.fRawEntered)
2000 return pCtxCore->eflags.u32;
2001 return PATMRawGetEFlags(pVM, pCtxCore);
2002#endif
2003}
2004
2005
2006/**
2007 * Gets and resets the changed flags (CPUM_CHANGED_*).
2008 * Only REM should call this function.
2009 *
2010 * @returns The changed flags.
2011 * @param pVCpu The VMCPU handle.
2012 */
2013VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVMCPU pVCpu)
2014{
2015 unsigned fFlags = pVCpu->cpum.s.fChanged;
2016 pVCpu->cpum.s.fChanged = 0;
2017 /** @todo change the switcher to use the fChanged flags. */
2018 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
2019 {
2020 fFlags |= CPUM_CHANGED_FPU_REM;
2021 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2022 }
2023 return fFlags;
2024}
2025
2026
2027/**
2028 * Sets the specified changed flags (CPUM_CHANGED_*).
2029 *
2030 * @param pVCpu The VMCPU handle.
2031 */
2032VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2033{
2034 pVCpu->cpum.s.fChanged |= fChangedFlags;
2035}
2036
2037
2038/**
2039 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2040 * @returns true if supported.
2041 * @returns false if not supported.
2042 * @param pVM The VM handle.
2043 */
2044VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2045{
2046 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2047}
2048
2049
2050/**
2051 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2052 * @returns true if used.
2053 * @returns false if not used.
2054 * @param pVM The VM handle.
2055 */
2056VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2057{
2058 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2059}
2060
2061
2062/**
2063 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2064 * @returns true if used.
2065 * @returns false if not used.
2066 * @param pVM The VM handle.
2067 */
2068VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2069{
2070 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2071}
2072
2073#ifndef IN_RING3
2074
2075/**
2076 * Lazily sync in the FPU/XMM state
2077 *
2078 * @returns VBox status code.
2079 * @param pVCpu VMCPU handle
2080 */
2081VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2082{
2083 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2084}
2085
2086#endif /* !IN_RING3 */
2087
2088/**
2089 * Checks if we activated the FPU/XMM state of the guest OS
2090 * @returns true if we did.
2091 * @returns false if not.
2092 * @param pVCpu The VMCPU handle.
2093 */
2094VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2095{
2096 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2097}
2098
2099
2100/**
2101 * Deactivate the FPU/XMM state of the guest OS
2102 * @param pVCpu The VMCPU handle.
2103 */
2104VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2105{
2106 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2107}
2108
2109
2110/**
2111 * Checks if the guest debug state is active
2112 *
2113 * @returns boolean
2114 * @param pVM VM handle.
2115 */
2116VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2117{
2118 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2119}
2120
2121/**
2122 * Checks if the hyper debug state is active
2123 *
2124 * @returns boolean
2125 * @param pVM VM handle.
2126 */
2127VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2128{
2129 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2130}
2131
2132
2133/**
2134 * Mark the guest's debug state as inactive
2135 *
2136 * @returns boolean
2137 * @param pVM VM handle.
2138 */
2139VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2140{
2141 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2142}
2143
2144
2145/**
2146 * Mark the hypervisor's debug state as inactive
2147 *
2148 * @returns boolean
2149 * @param pVM VM handle.
2150 */
2151VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2152{
2153 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2154}
2155
2156/**
2157 * Checks if the hidden selector registers are valid
2158 * @returns true if they are.
2159 * @returns false if not.
2160 * @param pVM The VM handle.
2161 */
2162VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
2163{
2164 return HWACCMIsEnabled(pVM);
2165}
2166
2167
2168
2169/**
2170 * Get the current privilege level of the guest.
2171 *
2172 * @returns cpl
2173 * @param pVM VM Handle.
2174 * @param pRegFrame Trap register frame.
2175 */
2176VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2177{
2178 uint32_t cpl;
2179
2180 if (CPUMAreHiddenSelRegsValid(pVCpu->CTX_SUFF(pVM)))
2181 {
2182 /*
2183 * The hidden CS.DPL register is always equal to the CPL, it is
2184 * not affected by loading a conforming coding segment.
2185 *
2186 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2187 * at SS. (ACP2 regression during install after a far call to ring 2)
2188 */
2189 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2190 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2191 else
2192 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2193 }
2194 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2195 {
2196 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2197 {
2198 /*
2199 * The SS RPL is always equal to the CPL, while the CS RPL
2200 * isn't necessarily equal if the segment is conforming.
2201 * See section 4.11.1 in the AMD manual.
2202 */
2203 cpl = (pCtxCore->ss & X86_SEL_RPL);
2204#ifndef IN_RING0
2205 if (cpl == 1)
2206 cpl = 0;
2207#endif
2208 }
2209 else
2210 cpl = 3;
2211 }
2212 else
2213 cpl = 0; /* real mode; cpl is zero */
2214
2215 return cpl;
2216}
2217
2218
2219/**
2220 * Gets the current guest CPU mode.
2221 *
2222 * If paging mode is what you need, check out PGMGetGuestMode().
2223 *
2224 * @returns The CPU mode.
2225 * @param pVCpu The VMCPU handle.
2226 */
2227VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2228{
2229 CPUMMODE enmMode;
2230 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2231 enmMode = CPUMMODE_REAL;
2232 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2233 enmMode = CPUMMODE_PROTECTED;
2234 else
2235 enmMode = CPUMMODE_LONG;
2236
2237 return enmMode;
2238}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette