VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 28112

Last change on this file since 28112 was 28030, checked in by vboxsync, 15 years ago

VMM: SpeedStep and relatives MSRs

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 59.7 KB
Line 
1/* $Id: CPUMAllRegs.cpp 28030 2010-04-07 07:53:12Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <VBox/hwaccm.h>
37#include <VBox/tm.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#ifdef IN_RING3
41#include <iprt/thread.h>
42#endif
43
44/** Disable stack frame pointer generation here. */
45#if defined(_MSC_VER) && !defined(DEBUG)
46# pragma optimize("y", off)
47#endif
48
49
50/**
51 * Sets or resets an alternative hypervisor context core.
52 *
53 * This is called when we get a hypervisor trap set switch the context
54 * core with the trap frame on the stack. It is called again to reset
55 * back to the default context core when resuming hypervisor execution.
56 *
57 * @param pVCpu The VMCPU handle.
58 * @param pCtxCore Pointer to the alternative context core or NULL
59 * to go back to the default context core.
60 */
61VMMDECL(void) CPUMHyperSetCtxCore(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
62{
63 PVM pVM = pVCpu->CTX_SUFF(pVM);
64
65 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVCpu->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
66 if (!pCtxCore)
67 {
68 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
69 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
70 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
71 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
72 }
73 else
74 {
75 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
76 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
77 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
78 }
79}
80
81
82/**
83 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
84 * This is only for reading in order to save a few calls.
85 *
86 * @param pVM Handle to the virtual machine.
87 */
88VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
89{
90 return pVCpu->cpum.s.CTX_SUFF(pHyperCore);
91}
92
93
94/**
95 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
96 *
97 * @returns VBox status code.
98 * @param pVM Handle to the virtual machine.
99 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
100 *
101 * @deprecated This will *not* (and has never) given the right picture of the
102 * hypervisor register state. With CPUMHyperSetCtxCore() this is
103 * getting much worse. So, use the individual functions for getting
104 * and esp. setting the hypervisor registers.
105 */
106VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx)
107{
108 *ppCtx = &pVCpu->cpum.s.Hyper;
109 return VINF_SUCCESS;
110}
111
112
113VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
114{
115 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
116 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
117 pVCpu->cpum.s.Hyper.gdtrPadding = 0;
118}
119
120
121VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
122{
123 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
124 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
125 pVCpu->cpum.s.Hyper.idtrPadding = 0;
126}
127
128
129VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
130{
131 pVCpu->cpum.s.Hyper.cr3 = cr3;
132
133#ifdef IN_RC
134 /* Update the current CR3. */
135 ASMSetCR3(cr3);
136#endif
137}
138
139VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
140{
141 return pVCpu->cpum.s.Hyper.cr3;
142}
143
144
145VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
146{
147 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
148}
149
150
151VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
152{
153 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
154}
155
156
157VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
158{
159 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
160}
161
162
163VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
164{
165 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
166}
167
168
169VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
170{
171 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
172}
173
174
175VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
176{
177 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
178}
179
180
181VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
182{
183 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
184}
185
186
187VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
188{
189 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
190 return VINF_SUCCESS;
191}
192
193
194VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
195{
196 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
197}
198
199
200VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
201{
202 pVCpu->cpum.s.Hyper.tr = SelTR;
203}
204
205
206VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
207{
208 pVCpu->cpum.s.Hyper.ldtr = SelLDTR;
209}
210
211
212VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
213{
214 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
215 /** @todo in GC we must load it! */
216}
217
218
219VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
220{
221 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
222 /** @todo in GC we must load it! */
223}
224
225
226VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
227{
228 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
229 /** @todo in GC we must load it! */
230}
231
232
233VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
234{
235 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
236 /** @todo in GC we must load it! */
237}
238
239
240VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
241{
242 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
243 /** @todo in GC we must load it! */
244}
245
246
247VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
248{
249 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
250 /** @todo in GC we must load it! */
251}
252
253
254VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
255{
256 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs;
257}
258
259
260VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
261{
262 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds;
263}
264
265
266VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
267{
268 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es;
269}
270
271
272VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
273{
274 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs;
275}
276
277
278VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
279{
280 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs;
281}
282
283
284VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
285{
286 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss;
287}
288
289
290VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
291{
292 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eax;
293}
294
295
296VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
297{
298 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebx;
299}
300
301
302VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
303{
304 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ecx;
305}
306
307
308VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
309{
310 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edx;
311}
312
313
314VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
315{
316 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esi;
317}
318
319
320VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
321{
322 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edi;
323}
324
325
326VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
327{
328 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebp;
329}
330
331
332VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
333{
334 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp;
335}
336
337
338VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
339{
340 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
341}
342
343
344VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
345{
346 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip;
347}
348
349
350VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
351{
352 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->rip;
353}
354
355
356VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
357{
358 if (pcbLimit)
359 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
360 return pVCpu->cpum.s.Hyper.idtr.pIdt;
361}
362
363
364VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
365{
366 if (pcbLimit)
367 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
368 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
369}
370
371
372VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
373{
374 return pVCpu->cpum.s.Hyper.ldtr;
375}
376
377
378VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
379{
380 return pVCpu->cpum.s.Hyper.dr[0];
381}
382
383
384VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
385{
386 return pVCpu->cpum.s.Hyper.dr[1];
387}
388
389
390VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
391{
392 return pVCpu->cpum.s.Hyper.dr[2];
393}
394
395
396VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
397{
398 return pVCpu->cpum.s.Hyper.dr[3];
399}
400
401
402VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
403{
404 return pVCpu->cpum.s.Hyper.dr[6];
405}
406
407
408VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
409{
410 return pVCpu->cpum.s.Hyper.dr[7];
411}
412
413
414/**
415 * Gets the pointer to the internal CPUMCTXCORE structure.
416 * This is only for reading in order to save a few calls.
417 *
418 * @param pVCpu Handle to the virtual cpu.
419 */
420VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
421{
422 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
423}
424
425
426/**
427 * Sets the guest context core registers.
428 *
429 * @param pVCpu Handle to the virtual cpu.
430 * @param pCtxCore The new context core values.
431 */
432VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
433{
434 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
435
436 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
437 *pCtxCoreDst = *pCtxCore;
438
439 /* Mask away invalid parts of the cpu context. */
440 if (!CPUMIsGuestInLongMode(pVCpu))
441 {
442 uint64_t u64Mask = UINT64_C(0xffffffff);
443
444 pCtxCoreDst->rip &= u64Mask;
445 pCtxCoreDst->rax &= u64Mask;
446 pCtxCoreDst->rbx &= u64Mask;
447 pCtxCoreDst->rcx &= u64Mask;
448 pCtxCoreDst->rdx &= u64Mask;
449 pCtxCoreDst->rsi &= u64Mask;
450 pCtxCoreDst->rdi &= u64Mask;
451 pCtxCoreDst->rbp &= u64Mask;
452 pCtxCoreDst->rsp &= u64Mask;
453 pCtxCoreDst->rflags.u &= u64Mask;
454
455 pCtxCoreDst->r8 = 0;
456 pCtxCoreDst->r9 = 0;
457 pCtxCoreDst->r10 = 0;
458 pCtxCoreDst->r11 = 0;
459 pCtxCoreDst->r12 = 0;
460 pCtxCoreDst->r13 = 0;
461 pCtxCoreDst->r14 = 0;
462 pCtxCoreDst->r15 = 0;
463 }
464}
465
466
467/**
468 * Queries the pointer to the internal CPUMCTX structure
469 *
470 * @returns The CPUMCTX pointer.
471 * @param pVCpu Handle to the virtual cpu.
472 */
473VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
474{
475 return &pVCpu->cpum.s.Guest;
476}
477
478VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
479{
480 pVCpu->cpum.s.Guest.gdtr.cbGdt = limit;
481 pVCpu->cpum.s.Guest.gdtr.pGdt = addr;
482 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
483 return VINF_SUCCESS;
484}
485
486VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
487{
488 pVCpu->cpum.s.Guest.idtr.cbIdt = limit;
489 pVCpu->cpum.s.Guest.idtr.pIdt = addr;
490 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
491 return VINF_SUCCESS;
492}
493
494VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
495{
496 AssertMsgFailed(("Need to load the hidden bits too!\n"));
497
498 pVCpu->cpum.s.Guest.tr = tr;
499 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
500 return VINF_SUCCESS;
501}
502
503VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
504{
505 pVCpu->cpum.s.Guest.ldtr = ldtr;
506 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
507 return VINF_SUCCESS;
508}
509
510
511/**
512 * Set the guest CR0.
513 *
514 * When called in GC, the hyper CR0 may be updated if that is
515 * required. The caller only has to take special action if AM,
516 * WP, PG or PE changes.
517 *
518 * @returns VINF_SUCCESS (consider it void).
519 * @param pVCpu Handle to the virtual cpu.
520 * @param cr0 The new CR0 value.
521 */
522VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
523{
524#ifdef IN_RC
525 /*
526 * Check if we need to change hypervisor CR0 because
527 * of math stuff.
528 */
529 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
530 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
531 {
532 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
533 {
534 /*
535 * We haven't saved the host FPU state yet, so TS and MT are both set
536 * and EM should be reflecting the guest EM (it always does this).
537 */
538 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
539 {
540 uint32_t HyperCR0 = ASMGetCR0();
541 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
542 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
543 HyperCR0 &= ~X86_CR0_EM;
544 HyperCR0 |= cr0 & X86_CR0_EM;
545 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
546 ASMSetCR0(HyperCR0);
547 }
548# ifdef VBOX_STRICT
549 else
550 {
551 uint32_t HyperCR0 = ASMGetCR0();
552 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
553 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
554 }
555# endif
556 }
557 else
558 {
559 /*
560 * Already saved the state, so we're just mirroring
561 * the guest flags.
562 */
563 uint32_t HyperCR0 = ASMGetCR0();
564 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
565 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
566 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
567 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
568 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
569 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
570 ASMSetCR0(HyperCR0);
571 }
572 }
573#endif /* IN_RC */
574
575 /*
576 * Check for changes causing TLB flushes (for REM).
577 * The caller is responsible for calling PGM when appropriate.
578 */
579 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
580 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
581 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
582 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
583
584 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
585 return VINF_SUCCESS;
586}
587
588
589VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
590{
591 pVCpu->cpum.s.Guest.cr2 = cr2;
592 return VINF_SUCCESS;
593}
594
595
596VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
597{
598 pVCpu->cpum.s.Guest.cr3 = cr3;
599 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
600 return VINF_SUCCESS;
601}
602
603
604VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
605{
606 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
607 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
608 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
609 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
610 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
611 cr4 &= ~X86_CR4_OSFSXR;
612 pVCpu->cpum.s.Guest.cr4 = cr4;
613 return VINF_SUCCESS;
614}
615
616
617VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
618{
619 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
620 return VINF_SUCCESS;
621}
622
623
624VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
625{
626 pVCpu->cpum.s.Guest.eip = eip;
627 return VINF_SUCCESS;
628}
629
630
631VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
632{
633 pVCpu->cpum.s.Guest.eax = eax;
634 return VINF_SUCCESS;
635}
636
637
638VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
639{
640 pVCpu->cpum.s.Guest.ebx = ebx;
641 return VINF_SUCCESS;
642}
643
644
645VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
646{
647 pVCpu->cpum.s.Guest.ecx = ecx;
648 return VINF_SUCCESS;
649}
650
651
652VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
653{
654 pVCpu->cpum.s.Guest.edx = edx;
655 return VINF_SUCCESS;
656}
657
658
659VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
660{
661 pVCpu->cpum.s.Guest.esp = esp;
662 return VINF_SUCCESS;
663}
664
665
666VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
667{
668 pVCpu->cpum.s.Guest.ebp = ebp;
669 return VINF_SUCCESS;
670}
671
672
673VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
674{
675 pVCpu->cpum.s.Guest.esi = esi;
676 return VINF_SUCCESS;
677}
678
679
680VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
681{
682 pVCpu->cpum.s.Guest.edi = edi;
683 return VINF_SUCCESS;
684}
685
686
687VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
688{
689 pVCpu->cpum.s.Guest.ss = ss;
690 return VINF_SUCCESS;
691}
692
693
694VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
695{
696 pVCpu->cpum.s.Guest.cs = cs;
697 return VINF_SUCCESS;
698}
699
700
701VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
702{
703 pVCpu->cpum.s.Guest.ds = ds;
704 return VINF_SUCCESS;
705}
706
707
708VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
709{
710 pVCpu->cpum.s.Guest.es = es;
711 return VINF_SUCCESS;
712}
713
714
715VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
716{
717 pVCpu->cpum.s.Guest.fs = fs;
718 return VINF_SUCCESS;
719}
720
721
722VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
723{
724 pVCpu->cpum.s.Guest.gs = gs;
725 return VINF_SUCCESS;
726}
727
728
729VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
730{
731 pVCpu->cpum.s.Guest.msrEFER = val;
732}
733
734
735VMMDECL(uint64_t) CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
736{
737 uint64_t u64 = 0;
738 uint8_t u8Multiplier = 4;
739
740 switch (idMsr)
741 {
742 case MSR_IA32_TSC:
743 u64 = TMCpuTickGet(pVCpu);
744 break;
745
746 case MSR_IA32_CR_PAT:
747 u64 = pVCpu->cpum.s.Guest.msrPAT;
748 break;
749
750 case MSR_IA32_SYSENTER_CS:
751 u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
752 break;
753
754 case MSR_IA32_SYSENTER_EIP:
755 u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
756 break;
757
758 case MSR_IA32_SYSENTER_ESP:
759 u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
760 break;
761
762 case MSR_K6_EFER:
763 u64 = pVCpu->cpum.s.Guest.msrEFER;
764 break;
765
766 case MSR_K8_SF_MASK:
767 u64 = pVCpu->cpum.s.Guest.msrSFMASK;
768 break;
769
770 case MSR_K6_STAR:
771 u64 = pVCpu->cpum.s.Guest.msrSTAR;
772 break;
773
774 case MSR_K8_LSTAR:
775 u64 = pVCpu->cpum.s.Guest.msrLSTAR;
776 break;
777
778 case MSR_K8_CSTAR:
779 u64 = pVCpu->cpum.s.Guest.msrCSTAR;
780 break;
781
782 case MSR_K8_KERNEL_GS_BASE:
783 u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
784 break;
785
786 case MSR_K8_TSC_AUX:
787 u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
788 break;
789
790 case MSR_IA32_PERF_STATUS:
791 /** @todo: could really be not exactly correct, maybe use host's values */
792 /* Keep consistent with helper_rdmsr() in REM */
793 u64 = (1000ULL /* TSC increment by tick */)
794 | ((uint64_t)u8Multiplier << 24 /* CPU multiplier (aka bus ratio) min */ )
795 | ((uint64_t)u8Multiplier << 40 /* CPU multiplier (aka bus ratio) max */ );
796 break;
797
798 case MSR_IA32_FSB_CLOCK_STS:
799 /**
800 * Encoded as:
801 * 0 - 266
802 * 1 - 133
803 * 2 - 200
804 * 3 - return 166
805 * 5 - return 100
806 */
807 u64 = (2 << 4);
808 break;
809
810 case MSR_IA32_PLATFORM_INFO:
811 u64 = ((u8Multiplier)<<8 /* Flex ratio max */)
812 | ((uint64_t)u8Multiplier << 40 /* Flex ratio min */ );
813 break;
814
815 case MSR_IA32_THERM_STATUS:
816 /* CPU temperature reltive to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
817 u64 = (1 << 31) /* validity bit */ |
818 (20 << 16) /* degrees till TCC */;
819 break;
820
821 case MSR_IA32_MISC_ENABLE:
822#if 0
823 /* Needs to be tested more before enabling. */
824 u64 = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
825#else
826 u64 = 0;
827#endif
828 break;
829
830 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
831 default:
832 AssertFailed();
833 break;
834 }
835 return u64;
836}
837
838VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
839{
840 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
841 switch (idMsr)
842 {
843 case MSR_K8_TSC_AUX:
844 pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
845 break;
846
847 case MSR_IA32_MISC_ENABLE:
848 pVCpu->cpum.s.GuestMsr.msr.miscEnable = valMsr;
849 break;
850
851 default:
852 AssertFailed();
853 break;
854 }
855}
856
857VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
858{
859 if (pcbLimit)
860 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
861 return pVCpu->cpum.s.Guest.idtr.pIdt;
862}
863
864
865VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
866{
867 if (pHidden)
868 *pHidden = pVCpu->cpum.s.Guest.trHid;
869 return pVCpu->cpum.s.Guest.tr;
870}
871
872
873VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
874{
875 return pVCpu->cpum.s.Guest.cs;
876}
877
878
879VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
880{
881 return pVCpu->cpum.s.Guest.ds;
882}
883
884
885VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
886{
887 return pVCpu->cpum.s.Guest.es;
888}
889
890
891VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
892{
893 return pVCpu->cpum.s.Guest.fs;
894}
895
896
897VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
898{
899 return pVCpu->cpum.s.Guest.gs;
900}
901
902
903VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
904{
905 return pVCpu->cpum.s.Guest.ss;
906}
907
908
909VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
910{
911 return pVCpu->cpum.s.Guest.ldtr;
912}
913
914
915VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
916{
917 return pVCpu->cpum.s.Guest.cr0;
918}
919
920
921VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
922{
923 return pVCpu->cpum.s.Guest.cr2;
924}
925
926
927VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
928{
929 return pVCpu->cpum.s.Guest.cr3;
930}
931
932
933VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
934{
935 return pVCpu->cpum.s.Guest.cr4;
936}
937
938
939VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
940{
941 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
942}
943
944
945VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
946{
947 return pVCpu->cpum.s.Guest.eip;
948}
949
950
951VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
952{
953 return pVCpu->cpum.s.Guest.rip;
954}
955
956
957VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
958{
959 return pVCpu->cpum.s.Guest.eax;
960}
961
962
963VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
964{
965 return pVCpu->cpum.s.Guest.ebx;
966}
967
968
969VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
970{
971 return pVCpu->cpum.s.Guest.ecx;
972}
973
974
975VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
976{
977 return pVCpu->cpum.s.Guest.edx;
978}
979
980
981VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
982{
983 return pVCpu->cpum.s.Guest.esi;
984}
985
986
987VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
988{
989 return pVCpu->cpum.s.Guest.edi;
990}
991
992
993VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
994{
995 return pVCpu->cpum.s.Guest.esp;
996}
997
998
999VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1000{
1001 return pVCpu->cpum.s.Guest.ebp;
1002}
1003
1004
1005VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1006{
1007 return pVCpu->cpum.s.Guest.eflags.u32;
1008}
1009
1010
1011///@todo: crx should be an array
1012VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1013{
1014 switch (iReg)
1015 {
1016 case USE_REG_CR0:
1017 *pValue = pVCpu->cpum.s.Guest.cr0;
1018 break;
1019 case USE_REG_CR2:
1020 *pValue = pVCpu->cpum.s.Guest.cr2;
1021 break;
1022 case USE_REG_CR3:
1023 *pValue = pVCpu->cpum.s.Guest.cr3;
1024 break;
1025 case USE_REG_CR4:
1026 *pValue = pVCpu->cpum.s.Guest.cr4;
1027 break;
1028 default:
1029 return VERR_INVALID_PARAMETER;
1030 }
1031 return VINF_SUCCESS;
1032}
1033
1034
1035VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1036{
1037 return pVCpu->cpum.s.Guest.dr[0];
1038}
1039
1040
1041VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1042{
1043 return pVCpu->cpum.s.Guest.dr[1];
1044}
1045
1046
1047VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1048{
1049 return pVCpu->cpum.s.Guest.dr[2];
1050}
1051
1052
1053VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1054{
1055 return pVCpu->cpum.s.Guest.dr[3];
1056}
1057
1058
1059VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1060{
1061 return pVCpu->cpum.s.Guest.dr[6];
1062}
1063
1064
1065VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1066{
1067 return pVCpu->cpum.s.Guest.dr[7];
1068}
1069
1070
1071VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1072{
1073 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1074 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1075 if (iReg == 4 || iReg == 5)
1076 iReg += 2;
1077 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1078 return VINF_SUCCESS;
1079}
1080
1081
1082VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1083{
1084 return pVCpu->cpum.s.Guest.msrEFER;
1085}
1086
1087
1088/**
1089 * Gets a CpuId leaf.
1090 *
1091 * @param pVCpu The VMCPU handle.
1092 * @param iLeaf The CPUID leaf to get.
1093 * @param pEax Where to store the EAX value.
1094 * @param pEbx Where to store the EBX value.
1095 * @param pEcx Where to store the ECX value.
1096 * @param pEdx Where to store the EDX value.
1097 */
1098VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1099{
1100 PVM pVM = pVCpu->CTX_SUFF(pVM);
1101
1102 PCCPUMCPUID pCpuId;
1103 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1104 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1105 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1106 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1107 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1108 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1109 else
1110 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1111
1112 uint32_t cCurrentCacheIndex = *pEcx;
1113
1114 *pEax = pCpuId->eax;
1115 *pEbx = pCpuId->ebx;
1116 *pEcx = pCpuId->ecx;
1117 *pEdx = pCpuId->edx;
1118
1119 if ( iLeaf == 1)
1120 {
1121 /* Bits 31-24: Initial APIC ID */
1122 Assert(pVCpu->idCpu <= 255);
1123 *pEbx |= (pVCpu->idCpu << 24);
1124 }
1125
1126 if ( iLeaf == 4
1127 && cCurrentCacheIndex < 3
1128 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1129 {
1130 uint32_t type, level, sharing, linesize,
1131 partitions, associativity, sets, cores;
1132
1133 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1134 partitions = 1;
1135 /* Those are only to shut up compiler, as they will always
1136 get overwritten, and compiler should be able to figure that out */
1137 sets = associativity = sharing = level = 1;
1138 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1139 switch (cCurrentCacheIndex)
1140 {
1141 case 0:
1142 type = 1;
1143 level = 1;
1144 sharing = 1;
1145 linesize = 64;
1146 associativity = 8;
1147 sets = 64;
1148 break;
1149 case 1:
1150 level = 1;
1151 type = 2;
1152 sharing = 1;
1153 linesize = 64;
1154 associativity = 8;
1155 sets = 64;
1156 break;
1157 default: /* shut up gcc.*/
1158 AssertFailed();
1159 case 2:
1160 level = 2;
1161 type = 3;
1162 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1163 linesize = 64;
1164 associativity = 24;
1165 sets = 4096;
1166 break;
1167 }
1168
1169 *pEax |= ((cores - 1) << 26) |
1170 ((sharing - 1) << 14) |
1171 (level << 5) |
1172 1;
1173 *pEbx = (linesize - 1) |
1174 ((partitions - 1) << 12) |
1175 ((associativity - 1) << 22); /* -1 encoding */
1176 *pEcx = sets - 1;
1177 }
1178
1179 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1180}
1181
1182/**
1183 * Gets a number of standard CPUID leafs.
1184 *
1185 * @returns Number of leafs.
1186 * @param pVM The VM handle.
1187 * @remark Intended for PATM.
1188 */
1189VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1190{
1191 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1192}
1193
1194
1195/**
1196 * Gets a number of extended CPUID leafs.
1197 *
1198 * @returns Number of leafs.
1199 * @param pVM The VM handle.
1200 * @remark Intended for PATM.
1201 */
1202VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1203{
1204 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1205}
1206
1207
1208/**
1209 * Gets a number of centaur CPUID leafs.
1210 *
1211 * @returns Number of leafs.
1212 * @param pVM The VM handle.
1213 * @remark Intended for PATM.
1214 */
1215VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1216{
1217 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1218}
1219
1220
1221/**
1222 * Sets a CPUID feature bit.
1223 *
1224 * @param pVM The VM Handle.
1225 * @param enmFeature The feature to set.
1226 */
1227VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1228{
1229 switch (enmFeature)
1230 {
1231 /*
1232 * Set the APIC bit in both feature masks.
1233 */
1234 case CPUMCPUIDFEATURE_APIC:
1235 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1236 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1237 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1238 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1239 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1240 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1241 break;
1242
1243 /*
1244 * Set the x2APIC bit in the standard feature mask.
1245 */
1246 case CPUMCPUIDFEATURE_X2APIC:
1247 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1248 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1249 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1250 break;
1251
1252 /*
1253 * Set the sysenter/sysexit bit in the standard feature mask.
1254 * Assumes the caller knows what it's doing! (host must support these)
1255 */
1256 case CPUMCPUIDFEATURE_SEP:
1257 {
1258 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1259 {
1260 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1261 return;
1262 }
1263
1264 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1265 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1266 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1267 break;
1268 }
1269
1270 /*
1271 * Set the syscall/sysret bit in the extended feature mask.
1272 * Assumes the caller knows what it's doing! (host must support these)
1273 */
1274 case CPUMCPUIDFEATURE_SYSCALL:
1275 {
1276 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1277 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1278 {
1279#if HC_ARCH_BITS == 32
1280 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1281 * Even when the cpu is capable of doing so in 64 bits mode.
1282 */
1283 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1284 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1285 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1286#endif
1287 {
1288 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1289 return;
1290 }
1291 }
1292 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1293 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1294 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1295 break;
1296 }
1297
1298 /*
1299 * Set the PAE bit in both feature masks.
1300 * Assumes the caller knows what it's doing! (host must support these)
1301 */
1302 case CPUMCPUIDFEATURE_PAE:
1303 {
1304 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1305 {
1306 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1307 return;
1308 }
1309
1310 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1311 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1312 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1313 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1314 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1315 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1316 break;
1317 }
1318
1319 /*
1320 * Set the LONG MODE bit in the extended feature mask.
1321 * Assumes the caller knows what it's doing! (host must support these)
1322 */
1323 case CPUMCPUIDFEATURE_LONG_MODE:
1324 {
1325 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1326 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1327 {
1328 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1329 return;
1330 }
1331
1332 /* Valid for both Intel and AMD. */
1333 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1334 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1335 break;
1336 }
1337
1338 /*
1339 * Set the NXE bit in the extended feature mask.
1340 * Assumes the caller knows what it's doing! (host must support these)
1341 */
1342 case CPUMCPUIDFEATURE_NXE:
1343 {
1344 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1345 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1346 {
1347 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1348 return;
1349 }
1350
1351 /* Valid for both Intel and AMD. */
1352 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1353 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1354 break;
1355 }
1356
1357 case CPUMCPUIDFEATURE_LAHF:
1358 {
1359 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1360 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1361 {
1362 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1363 return;
1364 }
1365
1366 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1367 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1368 break;
1369 }
1370
1371 case CPUMCPUIDFEATURE_PAT:
1372 {
1373 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1374 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1375 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1376 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1377 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1378 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1379 break;
1380 }
1381
1382 case CPUMCPUIDFEATURE_RDTSCP:
1383 {
1384 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1385 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP))
1386 {
1387 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1388 return;
1389 }
1390
1391 /* Valid for AMD only (for now). */
1392 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1393 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1394 break;
1395 }
1396
1397 default:
1398 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1399 break;
1400 }
1401 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1402 {
1403 PVMCPU pVCpu = &pVM->aCpus[i];
1404 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1405 }
1406}
1407
1408
1409/**
1410 * Queries a CPUID feature bit.
1411 *
1412 * @returns boolean for feature presence
1413 * @param pVM The VM Handle.
1414 * @param enmFeature The feature to query.
1415 */
1416VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1417{
1418 switch (enmFeature)
1419 {
1420 case CPUMCPUIDFEATURE_PAE:
1421 {
1422 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1423 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1424 break;
1425 }
1426
1427 case CPUMCPUIDFEATURE_RDTSCP:
1428 {
1429 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1430 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1431 break;
1432 }
1433
1434 case CPUMCPUIDFEATURE_LONG_MODE:
1435 {
1436 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1437 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1438 break;
1439 }
1440
1441 default:
1442 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1443 break;
1444 }
1445 return false;
1446}
1447
1448
1449/**
1450 * Clears a CPUID feature bit.
1451 *
1452 * @param pVM The VM Handle.
1453 * @param enmFeature The feature to clear.
1454 */
1455VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1456{
1457 switch (enmFeature)
1458 {
1459 /*
1460 * Set the APIC bit in both feature masks.
1461 */
1462 case CPUMCPUIDFEATURE_APIC:
1463 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1464 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1465 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1466 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1467 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1468 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1469 break;
1470
1471 /*
1472 * Clear the x2APIC bit in the standard feature mask.
1473 */
1474 case CPUMCPUIDFEATURE_X2APIC:
1475 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1476 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1477 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1478 break;
1479
1480 case CPUMCPUIDFEATURE_PAE:
1481 {
1482 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1483 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1484 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1485 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1486 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1487 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1488 break;
1489 }
1490
1491 case CPUMCPUIDFEATURE_PAT:
1492 {
1493 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1494 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1495 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1496 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1497 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1498 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1499 break;
1500 }
1501
1502 case CPUMCPUIDFEATURE_LONG_MODE:
1503 {
1504 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1505 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1506 break;
1507 }
1508
1509 case CPUMCPUIDFEATURE_LAHF:
1510 {
1511 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1512 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1513 break;
1514 }
1515
1516 default:
1517 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1518 break;
1519 }
1520 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1521 {
1522 PVMCPU pVCpu = &pVM->aCpus[i];
1523 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1524 }
1525}
1526
1527
1528/**
1529 * Gets the host CPU vendor
1530 *
1531 * @returns CPU vendor
1532 * @param pVM The VM handle.
1533 */
1534VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1535{
1536 return pVM->cpum.s.enmHostCpuVendor;
1537}
1538
1539/**
1540 * Gets the CPU vendor
1541 *
1542 * @returns CPU vendor
1543 * @param pVM The VM handle.
1544 */
1545VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1546{
1547 return pVM->cpum.s.enmGuestCpuVendor;
1548}
1549
1550
1551VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1552{
1553 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1554 return CPUMRecalcHyperDRx(pVCpu);
1555}
1556
1557
1558VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1559{
1560 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1561 return CPUMRecalcHyperDRx(pVCpu);
1562}
1563
1564
1565VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1566{
1567 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1568 return CPUMRecalcHyperDRx(pVCpu);
1569}
1570
1571
1572VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1573{
1574 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1575 return CPUMRecalcHyperDRx(pVCpu);
1576}
1577
1578
1579VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1580{
1581 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1582 return CPUMRecalcHyperDRx(pVCpu);
1583}
1584
1585
1586VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1587{
1588 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1589 return CPUMRecalcHyperDRx(pVCpu);
1590}
1591
1592
1593VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1594{
1595 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1596 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1597 if (iReg == 4 || iReg == 5)
1598 iReg += 2;
1599 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1600 return CPUMRecalcHyperDRx(pVCpu);
1601}
1602
1603
1604/**
1605 * Recalculates the hypvervisor DRx register values based on
1606 * current guest registers and DBGF breakpoints.
1607 *
1608 * This is called whenever a guest DRx register is modified and when DBGF
1609 * sets a hardware breakpoint. In guest context this function will reload
1610 * any (hyper) DRx registers which comes out with a different value.
1611 *
1612 * @returns VINF_SUCCESS.
1613 * @param pVCpu The VMCPU handle.
1614 */
1615VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1616{
1617 PVM pVM = pVCpu->CTX_SUFF(pVM);
1618
1619 /*
1620 * Compare the DR7s first.
1621 *
1622 * We only care about the enabled flags. The GE and LE flags are always
1623 * set and we don't care if the guest doesn't set them. GD is virtualized
1624 * when we dispatch #DB, we never enable it.
1625 */
1626 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1627#ifdef CPUM_VIRTUALIZE_DRX
1628 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1629#else
1630 const RTGCUINTREG uGstDr7 = 0;
1631#endif
1632 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1633 {
1634 /*
1635 * Ok, something is enabled. Recalc each of the breakpoints.
1636 * Straight forward code, not optimized/minimized in any way.
1637 */
1638 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1639
1640 /* bp 0 */
1641 RTGCUINTREG uNewDr0;
1642 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1643 {
1644 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1645 uNewDr0 = DBGFBpGetDR0(pVM);
1646 }
1647 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1648 {
1649 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1650 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1651 }
1652 else
1653 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
1654
1655 /* bp 1 */
1656 RTGCUINTREG uNewDr1;
1657 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1658 {
1659 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1660 uNewDr1 = DBGFBpGetDR1(pVM);
1661 }
1662 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1663 {
1664 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1665 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1666 }
1667 else
1668 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
1669
1670 /* bp 2 */
1671 RTGCUINTREG uNewDr2;
1672 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1673 {
1674 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1675 uNewDr2 = DBGFBpGetDR2(pVM);
1676 }
1677 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1678 {
1679 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1680 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1681 }
1682 else
1683 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
1684
1685 /* bp 3 */
1686 RTGCUINTREG uNewDr3;
1687 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1688 {
1689 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1690 uNewDr3 = DBGFBpGetDR3(pVM);
1691 }
1692 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1693 {
1694 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1695 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1696 }
1697 else
1698 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
1699
1700 /*
1701 * Apply the updates.
1702 */
1703#ifdef IN_RC
1704 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1705 {
1706 /** @todo save host DBx registers. */
1707 }
1708#endif
1709 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1710 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1711 CPUMSetHyperDR3(pVCpu, uNewDr3);
1712 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1713 CPUMSetHyperDR2(pVCpu, uNewDr2);
1714 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1715 CPUMSetHyperDR1(pVCpu, uNewDr1);
1716 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1717 CPUMSetHyperDR0(pVCpu, uNewDr0);
1718 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1719 CPUMSetHyperDR7(pVCpu, uNewDr7);
1720 }
1721 else
1722 {
1723#ifdef IN_RC
1724 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1725 {
1726 /** @todo restore host DBx registers. */
1727 }
1728#endif
1729 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1730 }
1731 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1732 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1733 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1734 pVCpu->cpum.s.Hyper.dr[7]));
1735
1736 return VINF_SUCCESS;
1737}
1738
1739
1740/**
1741 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1742 *
1743 * @returns true if in real mode, otherwise false.
1744 * @param pVCpu The virtual CPU handle.
1745 */
1746VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1747{
1748 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1749}
1750
1751
1752/**
1753 * Tests if the guest has the Page Size Extension enabled (PSE).
1754 *
1755 * @returns true if in real mode, otherwise false.
1756 * @param pVCpu The virtual CPU handle.
1757 */
1758VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1759{
1760 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1761 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1762}
1763
1764
1765/**
1766 * Tests if the guest has the paging enabled (PG).
1767 *
1768 * @returns true if in real mode, otherwise false.
1769 * @param pVCpu The virtual CPU handle.
1770 */
1771VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1772{
1773 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1774}
1775
1776
1777/**
1778 * Tests if the guest has the paging enabled (PG).
1779 *
1780 * @returns true if in real mode, otherwise false.
1781 * @param pVCpu The virtual CPU handle.
1782 */
1783VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1784{
1785 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1786}
1787
1788
1789/**
1790 * Tests if the guest is running in real mode or not.
1791 *
1792 * @returns true if in real mode, otherwise false.
1793 * @param pVCpu The virtual CPU handle.
1794 */
1795VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1796{
1797 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1798}
1799
1800
1801/**
1802 * Tests if the guest is running in protected or not.
1803 *
1804 * @returns true if in protected mode, otherwise false.
1805 * @param pVCpu The virtual CPU handle.
1806 */
1807VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1808{
1809 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1810}
1811
1812
1813/**
1814 * Tests if the guest is running in paged protected or not.
1815 *
1816 * @returns true if in paged protected mode, otherwise false.
1817 * @param pVCpu The virtual CPU handle.
1818 */
1819VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1820{
1821 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1822}
1823
1824
1825/**
1826 * Tests if the guest is running in long mode or not.
1827 *
1828 * @returns true if in long mode, otherwise false.
1829 * @param pVCpu The virtual CPU handle.
1830 */
1831VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1832{
1833 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1834}
1835
1836
1837/**
1838 * Tests if the guest is running in PAE mode or not.
1839 *
1840 * @returns true if in PAE mode, otherwise false.
1841 * @param pVCpu The virtual CPU handle.
1842 */
1843VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1844{
1845 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1846 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
1847 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1848}
1849
1850
1851
1852#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1853
1854/**
1855 * Transforms the guest CPU state to raw-ring mode.
1856 *
1857 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1858 *
1859 * @returns VBox status. (recompiler failure)
1860 * @param pVCpu The VMCPU handle.
1861 * @param pCtxCore The context core (for trap usage).
1862 * @see @ref pg_raw
1863 */
1864VMMDECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1865{
1866 PVM pVM = pVCpu->CTX_SUFF(pVM);
1867
1868 Assert(!pVM->cpum.s.fRawEntered);
1869 if (!pCtxCore)
1870 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
1871
1872 /*
1873 * Are we in Ring-0?
1874 */
1875 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1876 && !pCtxCore->eflags.Bits.u1VM)
1877 {
1878 /*
1879 * Enter execution mode.
1880 */
1881 PATMRawEnter(pVM, pCtxCore);
1882
1883 /*
1884 * Set CPL to Ring-1.
1885 */
1886 pCtxCore->ss |= 1;
1887 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1888 pCtxCore->cs |= 1;
1889 }
1890 else
1891 {
1892 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1893 ("ring-1 code not supported\n"));
1894 /*
1895 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1896 */
1897 PATMRawEnter(pVM, pCtxCore);
1898 }
1899
1900 /*
1901 * Assert sanity.
1902 */
1903 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1904 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1905 || pCtxCore->eflags.Bits.u1VM,
1906 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1907 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1908 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1909
1910 pVM->cpum.s.fRawEntered = true;
1911 return VINF_SUCCESS;
1912}
1913
1914
1915/**
1916 * Transforms the guest CPU state from raw-ring mode to correct values.
1917 *
1918 * This function will change any selector registers with DPL=1 to DPL=0.
1919 *
1920 * @returns Adjusted rc.
1921 * @param pVCpu The VMCPU handle.
1922 * @param rc Raw mode return code
1923 * @param pCtxCore The context core (for trap usage).
1924 * @see @ref pg_raw
1925 */
1926VMMDECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
1927{
1928 PVM pVM = pVCpu->CTX_SUFF(pVM);
1929
1930 /*
1931 * Don't leave if we've already left (in GC).
1932 */
1933 Assert(pVM->cpum.s.fRawEntered);
1934 if (!pVM->cpum.s.fRawEntered)
1935 return rc;
1936 pVM->cpum.s.fRawEntered = false;
1937
1938 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1939 if (!pCtxCore)
1940 pCtxCore = CPUMCTX2CORE(pCtx);
1941 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1942 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1943 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1944
1945 /*
1946 * Are we executing in raw ring-1?
1947 */
1948 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1949 && !pCtxCore->eflags.Bits.u1VM)
1950 {
1951 /*
1952 * Leave execution mode.
1953 */
1954 PATMRawLeave(pVM, pCtxCore, rc);
1955 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1956 /** @todo See what happens if we remove this. */
1957 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1958 pCtxCore->ds &= ~X86_SEL_RPL;
1959 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1960 pCtxCore->es &= ~X86_SEL_RPL;
1961 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1962 pCtxCore->fs &= ~X86_SEL_RPL;
1963 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1964 pCtxCore->gs &= ~X86_SEL_RPL;
1965
1966 /*
1967 * Ring-1 selector => Ring-0.
1968 */
1969 pCtxCore->ss &= ~X86_SEL_RPL;
1970 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1971 pCtxCore->cs &= ~X86_SEL_RPL;
1972 }
1973 else
1974 {
1975 /*
1976 * PATM is taking care of the IOPL and IF flags for us.
1977 */
1978 PATMRawLeave(pVM, pCtxCore, rc);
1979 if (!pCtxCore->eflags.Bits.u1VM)
1980 {
1981 /** @todo See what happens if we remove this. */
1982 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1983 pCtxCore->ds &= ~X86_SEL_RPL;
1984 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1985 pCtxCore->es &= ~X86_SEL_RPL;
1986 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1987 pCtxCore->fs &= ~X86_SEL_RPL;
1988 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1989 pCtxCore->gs &= ~X86_SEL_RPL;
1990 }
1991 }
1992
1993 return rc;
1994}
1995
1996/**
1997 * Updates the EFLAGS while we're in raw-mode.
1998 *
1999 * @param pVCpu The VMCPU handle.
2000 * @param pCtxCore The context core.
2001 * @param eflags The new EFLAGS value.
2002 */
2003VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
2004{
2005 PVM pVM = pVCpu->CTX_SUFF(pVM);
2006
2007 if (!pVM->cpum.s.fRawEntered)
2008 {
2009 pCtxCore->eflags.u32 = eflags;
2010 return;
2011 }
2012 PATMRawSetEFlags(pVM, pCtxCore, eflags);
2013}
2014
2015#endif /* !IN_RING0 */
2016
2017/**
2018 * Gets the EFLAGS while we're in raw-mode.
2019 *
2020 * @returns The eflags.
2021 * @param pVCpu The VMCPU handle.
2022 * @param pCtxCore The context core.
2023 */
2024VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2025{
2026#ifdef IN_RING0
2027 return pCtxCore->eflags.u32;
2028#else
2029 PVM pVM = pVCpu->CTX_SUFF(pVM);
2030
2031 if (!pVM->cpum.s.fRawEntered)
2032 return pCtxCore->eflags.u32;
2033 return PATMRawGetEFlags(pVM, pCtxCore);
2034#endif
2035}
2036
2037
2038/**
2039 * Gets and resets the changed flags (CPUM_CHANGED_*).
2040 * Only REM should call this function.
2041 *
2042 * @returns The changed flags.
2043 * @param pVCpu The VMCPU handle.
2044 */
2045VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVMCPU pVCpu)
2046{
2047 unsigned fFlags = pVCpu->cpum.s.fChanged;
2048 pVCpu->cpum.s.fChanged = 0;
2049 /** @todo change the switcher to use the fChanged flags. */
2050 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
2051 {
2052 fFlags |= CPUM_CHANGED_FPU_REM;
2053 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2054 }
2055 return fFlags;
2056}
2057
2058
2059/**
2060 * Sets the specified changed flags (CPUM_CHANGED_*).
2061 *
2062 * @param pVCpu The VMCPU handle.
2063 */
2064VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2065{
2066 pVCpu->cpum.s.fChanged |= fChangedFlags;
2067}
2068
2069
2070/**
2071 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2072 * @returns true if supported.
2073 * @returns false if not supported.
2074 * @param pVM The VM handle.
2075 */
2076VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2077{
2078 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2079}
2080
2081
2082/**
2083 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2084 * @returns true if used.
2085 * @returns false if not used.
2086 * @param pVM The VM handle.
2087 */
2088VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2089{
2090 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2091}
2092
2093
2094/**
2095 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2096 * @returns true if used.
2097 * @returns false if not used.
2098 * @param pVM The VM handle.
2099 */
2100VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2101{
2102 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2103}
2104
2105#ifndef IN_RING3
2106
2107/**
2108 * Lazily sync in the FPU/XMM state
2109 *
2110 * @returns VBox status code.
2111 * @param pVCpu VMCPU handle
2112 */
2113VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2114{
2115 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2116}
2117
2118#endif /* !IN_RING3 */
2119
2120/**
2121 * Checks if we activated the FPU/XMM state of the guest OS
2122 * @returns true if we did.
2123 * @returns false if not.
2124 * @param pVCpu The VMCPU handle.
2125 */
2126VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2127{
2128 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2129}
2130
2131
2132/**
2133 * Deactivate the FPU/XMM state of the guest OS
2134 * @param pVCpu The VMCPU handle.
2135 */
2136VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2137{
2138 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2139}
2140
2141
2142/**
2143 * Checks if the guest debug state is active
2144 *
2145 * @returns boolean
2146 * @param pVM VM handle.
2147 */
2148VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2149{
2150 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2151}
2152
2153/**
2154 * Checks if the hyper debug state is active
2155 *
2156 * @returns boolean
2157 * @param pVM VM handle.
2158 */
2159VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2160{
2161 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2162}
2163
2164
2165/**
2166 * Mark the guest's debug state as inactive
2167 *
2168 * @returns boolean
2169 * @param pVM VM handle.
2170 */
2171VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2172{
2173 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2174}
2175
2176
2177/**
2178 * Mark the hypervisor's debug state as inactive
2179 *
2180 * @returns boolean
2181 * @param pVM VM handle.
2182 */
2183VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2184{
2185 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2186}
2187
2188/**
2189 * Checks if the hidden selector registers are valid
2190 * @returns true if they are.
2191 * @returns false if not.
2192 * @param pVM The VM handle.
2193 */
2194VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
2195{
2196 return HWACCMIsEnabled(pVM);
2197}
2198
2199
2200
2201/**
2202 * Get the current privilege level of the guest.
2203 *
2204 * @returns cpl
2205 * @param pVM VM Handle.
2206 * @param pRegFrame Trap register frame.
2207 */
2208VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2209{
2210 uint32_t cpl;
2211
2212 if (CPUMAreHiddenSelRegsValid(pVCpu->CTX_SUFF(pVM)))
2213 {
2214 /*
2215 * The hidden CS.DPL register is always equal to the CPL, it is
2216 * not affected by loading a conforming coding segment.
2217 *
2218 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2219 * at SS. (ACP2 regression during install after a far call to ring 2)
2220 */
2221 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2222 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2223 else
2224 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2225 }
2226 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2227 {
2228 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2229 {
2230 /*
2231 * The SS RPL is always equal to the CPL, while the CS RPL
2232 * isn't necessarily equal if the segment is conforming.
2233 * See section 4.11.1 in the AMD manual.
2234 */
2235 cpl = (pCtxCore->ss & X86_SEL_RPL);
2236#ifndef IN_RING0
2237 if (cpl == 1)
2238 cpl = 0;
2239#endif
2240 }
2241 else
2242 cpl = 3;
2243 }
2244 else
2245 cpl = 0; /* real mode; cpl is zero */
2246
2247 return cpl;
2248}
2249
2250
2251/**
2252 * Gets the current guest CPU mode.
2253 *
2254 * If paging mode is what you need, check out PGMGetGuestMode().
2255 *
2256 * @returns The CPU mode.
2257 * @param pVCpu The VMCPU handle.
2258 */
2259VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2260{
2261 CPUMMODE enmMode;
2262 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2263 enmMode = CPUMMODE_REAL;
2264 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2265 enmMode = CPUMMODE_PROTECTED;
2266 else
2267 enmMode = CPUMMODE_LONG;
2268
2269 return enmMode;
2270}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette