VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 41931

Last change on this file since 41931 was 41931, checked in by vboxsync, 12 years ago

TRPM: Save state directly to the CPUMCPU context member instead of putting on the stack. this avoid copying the state around before returning to host context to service an IRQ, or before using IEM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 67.8 KB
Line 
1/* $Id: CPUMAllRegs.cpp 41931 2012-06-27 16:12:16Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include "CPUMInternal.h"
30#include <VBox/vmm/vm.h>
31#include <VBox/err.h>
32#include <VBox/dis.h>
33#include <VBox/log.h>
34#include <VBox/vmm/hwaccm.h>
35#include <VBox/vmm/tm.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#ifdef IN_RING3
40#include <iprt/thread.h>
41#endif
42
43/** Disable stack frame pointer generation here. */
44#if defined(_MSC_VER) && !defined(DEBUG)
45# pragma optimize("y", off)
46#endif
47
48
49/**
50 * Obsolete.
51 *
52 * We don't support nested hypervisor context interrupts or traps. Life is much
53 * simpler when we don't. It's also slightly faster at times.
54 *
55 * @param pVM Handle to the virtual machine.
56 */
57VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
58{
59 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
60}
61
62
63/**
64 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
65 *
66 * @param pVCpu Pointer to the virtual CPU.
67 */
68VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
69{
70 return &pVCpu->cpum.s.Hyper;
71}
72
73
74VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
75{
76 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
77 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
78}
79
80
81VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
82{
83 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
84 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
85}
86
87
88VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
89{
90 pVCpu->cpum.s.Hyper.cr3 = cr3;
91
92#ifdef IN_RC
93 /* Update the current CR3. */
94 ASMSetCR3(cr3);
95#endif
96}
97
98VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
99{
100 return pVCpu->cpum.s.Hyper.cr3;
101}
102
103
104VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
105{
106 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
107}
108
109
110VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
111{
112 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
113}
114
115
116VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
117{
118 pVCpu->cpum.s.Hyper.es.Sel = SelES;
119}
120
121
122VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
123{
124 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
125}
126
127
128VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
129{
130 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
131}
132
133
134VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
135{
136 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
137}
138
139
140VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
141{
142 pVCpu->cpum.s.Hyper.esp = u32ESP;
143}
144
145
146VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
147{
148 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
149 return VINF_SUCCESS;
150}
151
152
153VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
154{
155 pVCpu->cpum.s.Hyper.eip = u32EIP;
156}
157
158
159VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
160{
161 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
162}
163
164
165VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
166{
167 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
168}
169
170
171VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
172{
173 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
174 /** @todo in GC we must load it! */
175}
176
177
178VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
179{
180 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
181 /** @todo in GC we must load it! */
182}
183
184
185VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
186{
187 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
188 /** @todo in GC we must load it! */
189}
190
191
192VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
193{
194 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
195 /** @todo in GC we must load it! */
196}
197
198
199VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
200{
201 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
202 /** @todo in GC we must load it! */
203}
204
205
206VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
207{
208 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
209 /** @todo in GC we must load it! */
210}
211
212
213VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
214{
215 return pVCpu->cpum.s.Hyper.cs.Sel;
216}
217
218
219VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
220{
221 return pVCpu->cpum.s.Hyper.ds.Sel;
222}
223
224
225VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
226{
227 return pVCpu->cpum.s.Hyper.es.Sel;
228}
229
230
231VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
232{
233 return pVCpu->cpum.s.Hyper.fs.Sel;
234}
235
236
237VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
238{
239 return pVCpu->cpum.s.Hyper.gs.Sel;
240}
241
242
243VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
244{
245 return pVCpu->cpum.s.Hyper.ss.Sel;
246}
247
248
249VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
250{
251 return pVCpu->cpum.s.Hyper.eax;
252}
253
254
255VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
256{
257 return pVCpu->cpum.s.Hyper.ebx;
258}
259
260
261VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
262{
263 return pVCpu->cpum.s.Hyper.ecx;
264}
265
266
267VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
268{
269 return pVCpu->cpum.s.Hyper.edx;
270}
271
272
273VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
274{
275 return pVCpu->cpum.s.Hyper.esi;
276}
277
278
279VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
280{
281 return pVCpu->cpum.s.Hyper.edi;
282}
283
284
285VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
286{
287 return pVCpu->cpum.s.Hyper.ebp;
288}
289
290
291VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
292{
293 return pVCpu->cpum.s.Hyper.esp;
294}
295
296
297VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
298{
299 return pVCpu->cpum.s.Hyper.eflags.u32;
300}
301
302
303VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
304{
305 return pVCpu->cpum.s.Hyper.eip;
306}
307
308
309VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
310{
311 return pVCpu->cpum.s.Hyper.rip;
312}
313
314
315VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
316{
317 if (pcbLimit)
318 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
319 return pVCpu->cpum.s.Hyper.idtr.pIdt;
320}
321
322
323VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
324{
325 if (pcbLimit)
326 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
327 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
328}
329
330
331VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
332{
333 return pVCpu->cpum.s.Hyper.ldtr.Sel;
334}
335
336
337VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
338{
339 return pVCpu->cpum.s.Hyper.dr[0];
340}
341
342
343VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
344{
345 return pVCpu->cpum.s.Hyper.dr[1];
346}
347
348
349VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
350{
351 return pVCpu->cpum.s.Hyper.dr[2];
352}
353
354
355VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
356{
357 return pVCpu->cpum.s.Hyper.dr[3];
358}
359
360
361VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
362{
363 return pVCpu->cpum.s.Hyper.dr[6];
364}
365
366
367VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
368{
369 return pVCpu->cpum.s.Hyper.dr[7];
370}
371
372
373/**
374 * Gets the pointer to the internal CPUMCTXCORE structure.
375 * This is only for reading in order to save a few calls.
376 *
377 * @param pVCpu Handle to the virtual cpu.
378 */
379VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
380{
381 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
382}
383
384
385/**
386 * Sets the guest context core registers.
387 *
388 * @param pVCpu Handle to the virtual cpu.
389 * @param pCtxCore The new context core values.
390 */
391VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
392{
393 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
394
395 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
396 *pCtxCoreDst = *pCtxCore;
397
398 /* Mask away invalid parts of the cpu context. */
399 if (!CPUMIsGuestInLongMode(pVCpu))
400 {
401 uint64_t u64Mask = UINT64_C(0xffffffff);
402
403 pCtxCoreDst->rip &= u64Mask;
404 pCtxCoreDst->rax &= u64Mask;
405 pCtxCoreDst->rbx &= u64Mask;
406 pCtxCoreDst->rcx &= u64Mask;
407 pCtxCoreDst->rdx &= u64Mask;
408 pCtxCoreDst->rsi &= u64Mask;
409 pCtxCoreDst->rdi &= u64Mask;
410 pCtxCoreDst->rbp &= u64Mask;
411 pCtxCoreDst->rsp &= u64Mask;
412 pCtxCoreDst->rflags.u &= u64Mask;
413
414 pCtxCoreDst->r8 = 0;
415 pCtxCoreDst->r9 = 0;
416 pCtxCoreDst->r10 = 0;
417 pCtxCoreDst->r11 = 0;
418 pCtxCoreDst->r12 = 0;
419 pCtxCoreDst->r13 = 0;
420 pCtxCoreDst->r14 = 0;
421 pCtxCoreDst->r15 = 0;
422 }
423}
424
425
426/**
427 * Queries the pointer to the internal CPUMCTX structure
428 *
429 * @returns The CPUMCTX pointer.
430 * @param pVCpu Handle to the virtual cpu.
431 */
432VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
433{
434 return &pVCpu->cpum.s.Guest;
435}
436
437VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
438{
439 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
440 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
441 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
442 return VINF_SUCCESS;
443}
444
445VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
446{
447 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
448 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
449 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
450 return VINF_SUCCESS;
451}
452
453VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
454{
455 pVCpu->cpum.s.Guest.tr.Sel = tr;
456 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
457 return VINF_SUCCESS;
458}
459
460VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
461{
462 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
463 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
464 return VINF_SUCCESS;
465}
466
467
468/**
469 * Set the guest CR0.
470 *
471 * When called in GC, the hyper CR0 may be updated if that is
472 * required. The caller only has to take special action if AM,
473 * WP, PG or PE changes.
474 *
475 * @returns VINF_SUCCESS (consider it void).
476 * @param pVCpu Handle to the virtual cpu.
477 * @param cr0 The new CR0 value.
478 */
479VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
480{
481#ifdef IN_RC
482 /*
483 * Check if we need to change hypervisor CR0 because
484 * of math stuff.
485 */
486 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
487 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
488 {
489 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
490 {
491 /*
492 * We haven't saved the host FPU state yet, so TS and MT are both set
493 * and EM should be reflecting the guest EM (it always does this).
494 */
495 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
496 {
497 uint32_t HyperCR0 = ASMGetCR0();
498 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
499 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
500 HyperCR0 &= ~X86_CR0_EM;
501 HyperCR0 |= cr0 & X86_CR0_EM;
502 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
503 ASMSetCR0(HyperCR0);
504 }
505# ifdef VBOX_STRICT
506 else
507 {
508 uint32_t HyperCR0 = ASMGetCR0();
509 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
510 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
511 }
512# endif
513 }
514 else
515 {
516 /*
517 * Already saved the state, so we're just mirroring
518 * the guest flags.
519 */
520 uint32_t HyperCR0 = ASMGetCR0();
521 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
522 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
523 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
524 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
525 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
526 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
527 ASMSetCR0(HyperCR0);
528 }
529 }
530#endif /* IN_RC */
531
532 /*
533 * Check for changes causing TLB flushes (for REM).
534 * The caller is responsible for calling PGM when appropriate.
535 */
536 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
537 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
538 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
539 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
540
541 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
542 return VINF_SUCCESS;
543}
544
545
546VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
547{
548 pVCpu->cpum.s.Guest.cr2 = cr2;
549 return VINF_SUCCESS;
550}
551
552
553VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
554{
555 pVCpu->cpum.s.Guest.cr3 = cr3;
556 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
557 return VINF_SUCCESS;
558}
559
560
561VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
562{
563 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
564 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
565 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
566 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
567 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
568 cr4 &= ~X86_CR4_OSFSXR;
569 pVCpu->cpum.s.Guest.cr4 = cr4;
570 return VINF_SUCCESS;
571}
572
573
574VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
575{
576 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
577 return VINF_SUCCESS;
578}
579
580
581VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
582{
583 pVCpu->cpum.s.Guest.eip = eip;
584 return VINF_SUCCESS;
585}
586
587
588VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
589{
590 pVCpu->cpum.s.Guest.eax = eax;
591 return VINF_SUCCESS;
592}
593
594
595VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
596{
597 pVCpu->cpum.s.Guest.ebx = ebx;
598 return VINF_SUCCESS;
599}
600
601
602VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
603{
604 pVCpu->cpum.s.Guest.ecx = ecx;
605 return VINF_SUCCESS;
606}
607
608
609VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
610{
611 pVCpu->cpum.s.Guest.edx = edx;
612 return VINF_SUCCESS;
613}
614
615
616VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
617{
618 pVCpu->cpum.s.Guest.esp = esp;
619 return VINF_SUCCESS;
620}
621
622
623VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
624{
625 pVCpu->cpum.s.Guest.ebp = ebp;
626 return VINF_SUCCESS;
627}
628
629
630VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
631{
632 pVCpu->cpum.s.Guest.esi = esi;
633 return VINF_SUCCESS;
634}
635
636
637VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
638{
639 pVCpu->cpum.s.Guest.edi = edi;
640 return VINF_SUCCESS;
641}
642
643
644VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
645{
646 pVCpu->cpum.s.Guest.ss.Sel = ss;
647 return VINF_SUCCESS;
648}
649
650
651VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
652{
653 pVCpu->cpum.s.Guest.cs.Sel = cs;
654 return VINF_SUCCESS;
655}
656
657
658VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
659{
660 pVCpu->cpum.s.Guest.ds.Sel = ds;
661 return VINF_SUCCESS;
662}
663
664
665VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
666{
667 pVCpu->cpum.s.Guest.es.Sel = es;
668 return VINF_SUCCESS;
669}
670
671
672VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
673{
674 pVCpu->cpum.s.Guest.fs.Sel = fs;
675 return VINF_SUCCESS;
676}
677
678
679VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
680{
681 pVCpu->cpum.s.Guest.gs.Sel = gs;
682 return VINF_SUCCESS;
683}
684
685
686VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
687{
688 pVCpu->cpum.s.Guest.msrEFER = val;
689}
690
691
692/**
693 * Query an MSR.
694 *
695 * The caller is responsible for checking privilege if the call is the result
696 * of a RDMSR instruction. We'll do the rest.
697 *
698 * @retval VINF_SUCCESS on success.
699 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
700 * expected to take the appropriate actions. @a *puValue is set to 0.
701 * @param pVCpu Pointer to the VMCPU.
702 * @param idMsr The MSR.
703 * @param puValue Where to return the value.
704 *
705 * @remarks This will always return the right values, even when we're in the
706 * recompiler.
707 */
708VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
709{
710 /*
711 * If we don't indicate MSR support in the CPUID feature bits, indicate
712 * that a #GP(0) should be raised.
713 */
714 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
715 {
716 *puValue = 0;
717 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
718 }
719
720 int rc = VINF_SUCCESS;
721 uint8_t const u8Multiplier = 4;
722 switch (idMsr)
723 {
724 case MSR_IA32_TSC:
725 *puValue = TMCpuTickGet(pVCpu);
726 break;
727
728 case MSR_IA32_APICBASE:
729 rc = PDMApicGetBase(pVCpu->CTX_SUFF(pVM), puValue);
730 if (RT_SUCCESS(rc))
731 rc = VINF_SUCCESS;
732 else
733 {
734 *puValue = 0;
735 rc = VERR_CPUM_RAISE_GP_0;
736 }
737 break;
738
739 case MSR_IA32_CR_PAT:
740 *puValue = pVCpu->cpum.s.Guest.msrPAT;
741 break;
742
743 case MSR_IA32_SYSENTER_CS:
744 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
745 break;
746
747 case MSR_IA32_SYSENTER_EIP:
748 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
749 break;
750
751 case MSR_IA32_SYSENTER_ESP:
752 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
753 break;
754
755 case MSR_IA32_MTRR_CAP:
756 {
757 /* This is currently a bit weird. :-) */
758 uint8_t const cVariableRangeRegs = 0;
759 bool const fSystemManagementRangeRegisters = false;
760 bool const fFixedRangeRegisters = false;
761 bool const fWriteCombiningType = false;
762 *puValue = cVariableRangeRegs
763 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
764 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
765 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
766 break;
767 }
768
769 case MSR_IA32_MTRR_DEF_TYPE:
770 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
771 break;
772
773 case IA32_MTRR_FIX64K_00000:
774 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
775 break;
776 case IA32_MTRR_FIX16K_80000:
777 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
778 break;
779 case IA32_MTRR_FIX16K_A0000:
780 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
781 break;
782 case IA32_MTRR_FIX4K_C0000:
783 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
784 break;
785 case IA32_MTRR_FIX4K_C8000:
786 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
787 break;
788 case IA32_MTRR_FIX4K_D0000:
789 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
790 break;
791 case IA32_MTRR_FIX4K_D8000:
792 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
793 break;
794 case IA32_MTRR_FIX4K_E0000:
795 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
796 break;
797 case IA32_MTRR_FIX4K_E8000:
798 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
799 break;
800 case IA32_MTRR_FIX4K_F0000:
801 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
802 break;
803 case IA32_MTRR_FIX4K_F8000:
804 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
805 break;
806
807 case MSR_K6_EFER:
808 *puValue = pVCpu->cpum.s.Guest.msrEFER;
809 break;
810
811 case MSR_K8_SF_MASK:
812 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
813 break;
814
815 case MSR_K6_STAR:
816 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
817 break;
818
819 case MSR_K8_LSTAR:
820 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
821 break;
822
823 case MSR_K8_CSTAR:
824 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
825 break;
826
827 case MSR_K8_FS_BASE:
828 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
829 break;
830
831 case MSR_K8_GS_BASE:
832 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
833 break;
834
835 case MSR_K8_KERNEL_GS_BASE:
836 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
837 break;
838
839 case MSR_K8_TSC_AUX:
840 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
841 break;
842
843 case MSR_IA32_PERF_STATUS:
844 /** @todo could really be not exactly correct, maybe use host's values */
845 *puValue = UINT64_C(1000) /* TSC increment by tick */
846 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
847 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
848 break;
849
850 case MSR_IA32_FSB_CLOCK_STS:
851 /*
852 * Encoded as:
853 * 0 - 266
854 * 1 - 133
855 * 2 - 200
856 * 3 - return 166
857 * 5 - return 100
858 */
859 *puValue = (2 << 4);
860 break;
861
862 case MSR_IA32_PLATFORM_INFO:
863 *puValue = (u8Multiplier << 8) /* Flex ratio max */
864 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
865 break;
866
867 case MSR_IA32_THERM_STATUS:
868 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
869 *puValue = RT_BIT(31) /* validity bit */
870 | (UINT64_C(20) << 16) /* degrees till TCC */;
871 break;
872
873 case MSR_IA32_MISC_ENABLE:
874#if 0
875 /* Needs to be tested more before enabling. */
876 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
877#else
878 /* Currenty we don't allow guests to modify enable MSRs. */
879 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
880
881 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
882
883 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
884 /** @todo: add more cpuid-controlled features this way. */
885#endif
886 break;
887
888#if 0 /*def IN_RING0 */
889 case MSR_IA32_PLATFORM_ID:
890 case MSR_IA32_BIOS_SIGN_ID:
891 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
892 {
893 /* Available since the P6 family. VT-x implies that this feature is present. */
894 if (idMsr == MSR_IA32_PLATFORM_ID)
895 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
896 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
897 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
898 break;
899 }
900 /* no break */
901#endif
902
903 default:
904 /* In X2APIC specification this range is reserved for APIC control. */
905 if ( idMsr >= MSR_IA32_APIC_START
906 && idMsr < MSR_IA32_APIC_END)
907 {
908 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
909 if (RT_SUCCESS(rc))
910 rc = VINF_SUCCESS;
911 else
912 {
913 *puValue = 0;
914 rc = VERR_CPUM_RAISE_GP_0;
915 }
916 }
917 else
918 {
919 *puValue = 0;
920 rc = VERR_CPUM_RAISE_GP_0;
921 }
922 break;
923 }
924
925 return rc;
926}
927
928
929/**
930 * Sets the MSR.
931 *
932 * The caller is responsible for checking privilege if the call is the result
933 * of a WRMSR instruction. We'll do the rest.
934 *
935 * @retval VINF_SUCCESS on success.
936 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
937 * appropriate actions.
938 *
939 * @param pVCpu Pointer to the VMCPU.
940 * @param idMsr The MSR id.
941 * @param uValue The value to set.
942 *
943 * @remarks Everyone changing MSR values, including the recompiler, shall do it
944 * by calling this method. This makes sure we have current values and
945 * that we trigger all the right actions when something changes.
946 */
947VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
948{
949 /*
950 * If we don't indicate MSR support in the CPUID feature bits, indicate
951 * that a #GP(0) should be raised.
952 */
953 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
954 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
955
956 int rc = VINF_SUCCESS;
957 switch (idMsr)
958 {
959 case MSR_IA32_MISC_ENABLE:
960 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
961 break;
962
963 case MSR_IA32_TSC:
964 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
965 break;
966
967 case MSR_IA32_APICBASE:
968 rc = PDMApicSetBase(pVCpu->CTX_SUFF(pVM), uValue);
969 if (rc != VINF_SUCCESS)
970 rc = VERR_CPUM_RAISE_GP_0;
971 break;
972
973 case MSR_IA32_CR_PAT:
974 pVCpu->cpum.s.Guest.msrPAT = uValue;
975 break;
976
977 case MSR_IA32_SYSENTER_CS:
978 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
979 break;
980
981 case MSR_IA32_SYSENTER_EIP:
982 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
983 break;
984
985 case MSR_IA32_SYSENTER_ESP:
986 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
987 break;
988
989 case MSR_IA32_MTRR_CAP:
990 return VERR_CPUM_RAISE_GP_0;
991
992 case MSR_IA32_MTRR_DEF_TYPE:
993 if ( (uValue & UINT64_C(0xfffffffffffff300))
994 || ( (uValue & 0xff) != 0
995 && (uValue & 0xff) != 1
996 && (uValue & 0xff) != 4
997 && (uValue & 0xff) != 5
998 && (uValue & 0xff) != 6) )
999 {
1000 Log(("MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
1001 return VERR_CPUM_RAISE_GP_0;
1002 }
1003 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
1004 break;
1005
1006 case IA32_MTRR_FIX64K_00000:
1007 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1008 break;
1009 case IA32_MTRR_FIX16K_80000:
1010 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1011 break;
1012 case IA32_MTRR_FIX16K_A0000:
1013 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1014 break;
1015 case IA32_MTRR_FIX4K_C0000:
1016 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1017 break;
1018 case IA32_MTRR_FIX4K_C8000:
1019 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1020 break;
1021 case IA32_MTRR_FIX4K_D0000:
1022 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1023 break;
1024 case IA32_MTRR_FIX4K_D8000:
1025 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1026 break;
1027 case IA32_MTRR_FIX4K_E0000:
1028 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1029 break;
1030 case IA32_MTRR_FIX4K_E8000:
1031 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1032 break;
1033 case IA32_MTRR_FIX4K_F0000:
1034 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1035 break;
1036 case IA32_MTRR_FIX4K_F8000:
1037 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1038 break;
1039
1040 case MSR_K6_EFER:
1041 {
1042 PVM pVM = pVCpu->CTX_SUFF(pVM);
1043 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1044 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1045 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1046 : 0;
1047 uint64_t fMask = 0;
1048
1049 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1050 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_NX)
1051 fMask |= MSR_K6_EFER_NXE;
1052 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1053 fMask |= MSR_K6_EFER_LME;
1054 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_SEP)
1055 fMask |= MSR_K6_EFER_SCE;
1056 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1057 fMask |= MSR_K6_EFER_FFXSR;
1058
1059 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1060 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1061 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1062 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1063 {
1064 Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1065 return VERR_CPUM_RAISE_GP_0;
1066 }
1067
1068 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1069 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1070 ("Unexpected value %RX64\n", uValue));
1071 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1072
1073 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1074 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1075 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1076 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1077 {
1078 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1079 HWACCMFlushTLB(pVCpu);
1080
1081 /* Notify PGM about NXE changes. */
1082 if ( (uOldEFER & MSR_K6_EFER_NXE)
1083 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1084 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1085 }
1086 break;
1087 }
1088
1089 case MSR_K8_SF_MASK:
1090 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1091 break;
1092
1093 case MSR_K6_STAR:
1094 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1095 break;
1096
1097 case MSR_K8_LSTAR:
1098 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1099 break;
1100
1101 case MSR_K8_CSTAR:
1102 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1103 break;
1104
1105 case MSR_K8_FS_BASE:
1106 pVCpu->cpum.s.Guest.fs.u64Base = uValue;
1107 break;
1108
1109 case MSR_K8_GS_BASE:
1110 pVCpu->cpum.s.Guest.gs.u64Base = uValue;
1111 break;
1112
1113 case MSR_K8_KERNEL_GS_BASE:
1114 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1115 break;
1116
1117 case MSR_K8_TSC_AUX:
1118 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1119 break;
1120
1121 default:
1122 /* In X2APIC specification this range is reserved for APIC control. */
1123 if ( idMsr >= MSR_IA32_APIC_START
1124 && idMsr < MSR_IA32_APIC_END)
1125 {
1126 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1127 if (rc != VINF_SUCCESS)
1128 rc = VERR_CPUM_RAISE_GP_0;
1129 }
1130 else
1131 {
1132 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1133 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1134 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1135 }
1136 break;
1137 }
1138 return rc;
1139}
1140
1141
1142VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1143{
1144 if (pcbLimit)
1145 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1146 return pVCpu->cpum.s.Guest.idtr.pIdt;
1147}
1148
1149
1150VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1151{
1152 if (pHidden)
1153 *pHidden = pVCpu->cpum.s.Guest.tr;
1154 return pVCpu->cpum.s.Guest.tr.Sel;
1155}
1156
1157
1158VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1159{
1160 return pVCpu->cpum.s.Guest.cs.Sel;
1161}
1162
1163
1164VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1165{
1166 return pVCpu->cpum.s.Guest.ds.Sel;
1167}
1168
1169
1170VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1171{
1172 return pVCpu->cpum.s.Guest.es.Sel;
1173}
1174
1175
1176VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1177{
1178 return pVCpu->cpum.s.Guest.fs.Sel;
1179}
1180
1181
1182VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1183{
1184 return pVCpu->cpum.s.Guest.gs.Sel;
1185}
1186
1187
1188VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1189{
1190 return pVCpu->cpum.s.Guest.ss.Sel;
1191}
1192
1193
1194VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1195{
1196 return pVCpu->cpum.s.Guest.ldtr.Sel;
1197}
1198
1199
1200VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1201{
1202 return pVCpu->cpum.s.Guest.cr0;
1203}
1204
1205
1206VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1207{
1208 return pVCpu->cpum.s.Guest.cr2;
1209}
1210
1211
1212VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1213{
1214 return pVCpu->cpum.s.Guest.cr3;
1215}
1216
1217
1218VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1219{
1220 return pVCpu->cpum.s.Guest.cr4;
1221}
1222
1223
1224VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1225{
1226 uint64_t u64;
1227 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1228 if (RT_FAILURE(rc))
1229 u64 = 0;
1230 return u64;
1231}
1232
1233
1234VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1235{
1236 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1237}
1238
1239
1240VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1241{
1242 return pVCpu->cpum.s.Guest.eip;
1243}
1244
1245
1246VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1247{
1248 return pVCpu->cpum.s.Guest.rip;
1249}
1250
1251
1252VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1253{
1254 return pVCpu->cpum.s.Guest.eax;
1255}
1256
1257
1258VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1259{
1260 return pVCpu->cpum.s.Guest.ebx;
1261}
1262
1263
1264VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1265{
1266 return pVCpu->cpum.s.Guest.ecx;
1267}
1268
1269
1270VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1271{
1272 return pVCpu->cpum.s.Guest.edx;
1273}
1274
1275
1276VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1277{
1278 return pVCpu->cpum.s.Guest.esi;
1279}
1280
1281
1282VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1283{
1284 return pVCpu->cpum.s.Guest.edi;
1285}
1286
1287
1288VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1289{
1290 return pVCpu->cpum.s.Guest.esp;
1291}
1292
1293
1294VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1295{
1296 return pVCpu->cpum.s.Guest.ebp;
1297}
1298
1299
1300VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1301{
1302 return pVCpu->cpum.s.Guest.eflags.u32;
1303}
1304
1305
1306VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1307{
1308 switch (iReg)
1309 {
1310 case DISCREG_CR0:
1311 *pValue = pVCpu->cpum.s.Guest.cr0;
1312 break;
1313
1314 case DISCREG_CR2:
1315 *pValue = pVCpu->cpum.s.Guest.cr2;
1316 break;
1317
1318 case DISCREG_CR3:
1319 *pValue = pVCpu->cpum.s.Guest.cr3;
1320 break;
1321
1322 case DISCREG_CR4:
1323 *pValue = pVCpu->cpum.s.Guest.cr4;
1324 break;
1325
1326 case DISCREG_CR8:
1327 {
1328 uint8_t u8Tpr;
1329 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /*pfPending*/);
1330 if (RT_FAILURE(rc))
1331 {
1332 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1333 *pValue = 0;
1334 return rc;
1335 }
1336 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1337 break;
1338 }
1339
1340 default:
1341 return VERR_INVALID_PARAMETER;
1342 }
1343 return VINF_SUCCESS;
1344}
1345
1346
1347VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1348{
1349 return pVCpu->cpum.s.Guest.dr[0];
1350}
1351
1352
1353VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1354{
1355 return pVCpu->cpum.s.Guest.dr[1];
1356}
1357
1358
1359VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1360{
1361 return pVCpu->cpum.s.Guest.dr[2];
1362}
1363
1364
1365VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1366{
1367 return pVCpu->cpum.s.Guest.dr[3];
1368}
1369
1370
1371VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1372{
1373 return pVCpu->cpum.s.Guest.dr[6];
1374}
1375
1376
1377VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1378{
1379 return pVCpu->cpum.s.Guest.dr[7];
1380}
1381
1382
1383VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1384{
1385 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1386 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1387 if (iReg == 4 || iReg == 5)
1388 iReg += 2;
1389 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1390 return VINF_SUCCESS;
1391}
1392
1393
1394VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1395{
1396 return pVCpu->cpum.s.Guest.msrEFER;
1397}
1398
1399
1400/**
1401 * Gets a CpuId leaf.
1402 *
1403 * @param pVCpu Pointer to the VMCPU.
1404 * @param iLeaf The CPUID leaf to get.
1405 * @param pEax Where to store the EAX value.
1406 * @param pEbx Where to store the EBX value.
1407 * @param pEcx Where to store the ECX value.
1408 * @param pEdx Where to store the EDX value.
1409 */
1410VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1411{
1412 PVM pVM = pVCpu->CTX_SUFF(pVM);
1413
1414 PCCPUMCPUID pCpuId;
1415 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1416 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1417 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1418 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1419 else if ( iLeaf - UINT32_C(0x40000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdHyper)
1420 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP))
1421 pCpuId = &pVM->cpum.s.aGuestCpuIdHyper[iLeaf - UINT32_C(0x40000000)]; /* Only report if HVP bit set. */
1422 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1423 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1424 else
1425 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1426
1427 uint32_t cCurrentCacheIndex = *pEcx;
1428
1429 *pEax = pCpuId->eax;
1430 *pEbx = pCpuId->ebx;
1431 *pEcx = pCpuId->ecx;
1432 *pEdx = pCpuId->edx;
1433
1434 if ( iLeaf == 1)
1435 {
1436 /* Bits 31-24: Initial APIC ID */
1437 Assert(pVCpu->idCpu <= 255);
1438 *pEbx |= (pVCpu->idCpu << 24);
1439 }
1440
1441 if ( iLeaf == 4
1442 && cCurrentCacheIndex < 3
1443 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1444 {
1445 uint32_t type, level, sharing, linesize,
1446 partitions, associativity, sets, cores;
1447
1448 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1449 partitions = 1;
1450 /* Those are only to shut up compiler, as they will always
1451 get overwritten, and compiler should be able to figure that out */
1452 sets = associativity = sharing = level = 1;
1453 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1454 switch (cCurrentCacheIndex)
1455 {
1456 case 0:
1457 type = 1;
1458 level = 1;
1459 sharing = 1;
1460 linesize = 64;
1461 associativity = 8;
1462 sets = 64;
1463 break;
1464 case 1:
1465 level = 1;
1466 type = 2;
1467 sharing = 1;
1468 linesize = 64;
1469 associativity = 8;
1470 sets = 64;
1471 break;
1472 default: /* shut up gcc.*/
1473 AssertFailed();
1474 case 2:
1475 level = 2;
1476 type = 3;
1477 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1478 linesize = 64;
1479 associativity = 24;
1480 sets = 4096;
1481 break;
1482 }
1483
1484 *pEax |= ((cores - 1) << 26) |
1485 ((sharing - 1) << 14) |
1486 (level << 5) |
1487 1;
1488 *pEbx = (linesize - 1) |
1489 ((partitions - 1) << 12) |
1490 ((associativity - 1) << 22); /* -1 encoding */
1491 *pEcx = sets - 1;
1492 }
1493
1494 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1495}
1496
1497/**
1498 * Gets a number of standard CPUID leafs.
1499 *
1500 * @returns Number of leafs.
1501 * @param pVM Pointer to the VM.
1502 * @remark Intended for PATM.
1503 */
1504VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1505{
1506 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1507}
1508
1509
1510/**
1511 * Gets a number of extended CPUID leafs.
1512 *
1513 * @returns Number of leafs.
1514 * @param pVM Pointer to the VM.
1515 * @remark Intended for PATM.
1516 */
1517VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1518{
1519 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1520}
1521
1522
1523/**
1524 * Gets a number of centaur CPUID leafs.
1525 *
1526 * @returns Number of leafs.
1527 * @param pVM Pointer to the VM.
1528 * @remark Intended for PATM.
1529 */
1530VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1531{
1532 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1533}
1534
1535
1536/**
1537 * Sets a CPUID feature bit.
1538 *
1539 * @param pVM Pointer to the VM.
1540 * @param enmFeature The feature to set.
1541 */
1542VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1543{
1544 switch (enmFeature)
1545 {
1546 /*
1547 * Set the APIC bit in both feature masks.
1548 */
1549 case CPUMCPUIDFEATURE_APIC:
1550 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1551 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1552 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1553 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1554 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1555 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1556 break;
1557
1558 /*
1559 * Set the x2APIC bit in the standard feature mask.
1560 */
1561 case CPUMCPUIDFEATURE_X2APIC:
1562 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1563 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1564 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1565 break;
1566
1567 /*
1568 * Set the sysenter/sysexit bit in the standard feature mask.
1569 * Assumes the caller knows what it's doing! (host must support these)
1570 */
1571 case CPUMCPUIDFEATURE_SEP:
1572 {
1573 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1574 {
1575 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1576 return;
1577 }
1578
1579 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1580 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1581 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1582 break;
1583 }
1584
1585 /*
1586 * Set the syscall/sysret bit in the extended feature mask.
1587 * Assumes the caller knows what it's doing! (host must support these)
1588 */
1589 case CPUMCPUIDFEATURE_SYSCALL:
1590 {
1591 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1592 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1593 {
1594#if HC_ARCH_BITS == 32
1595 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1596 * Even when the cpu is capable of doing so in 64 bits mode.
1597 */
1598 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1599 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1600 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1601#endif
1602 {
1603 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1604 return;
1605 }
1606 }
1607 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1608 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1609 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1610 break;
1611 }
1612
1613 /*
1614 * Set the PAE bit in both feature masks.
1615 * Assumes the caller knows what it's doing! (host must support these)
1616 */
1617 case CPUMCPUIDFEATURE_PAE:
1618 {
1619 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1620 {
1621 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1622 return;
1623 }
1624
1625 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1626 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1627 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1628 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1629 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1630 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1631 break;
1632 }
1633
1634 /*
1635 * Set the LONG MODE bit in the extended feature mask.
1636 * Assumes the caller knows what it's doing! (host must support these)
1637 */
1638 case CPUMCPUIDFEATURE_LONG_MODE:
1639 {
1640 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1641 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1642 {
1643 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1644 return;
1645 }
1646
1647 /* Valid for both Intel and AMD. */
1648 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1649 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1650 break;
1651 }
1652
1653 /*
1654 * Set the NXE bit in the extended feature mask.
1655 * Assumes the caller knows what it's doing! (host must support these)
1656 */
1657 case CPUMCPUIDFEATURE_NXE:
1658 {
1659 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1660 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1661 {
1662 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1663 return;
1664 }
1665
1666 /* Valid for both Intel and AMD. */
1667 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1668 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1669 break;
1670 }
1671
1672 case CPUMCPUIDFEATURE_LAHF:
1673 {
1674 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1675 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1676 {
1677 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1678 return;
1679 }
1680
1681 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1682 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1683 break;
1684 }
1685
1686 case CPUMCPUIDFEATURE_PAT:
1687 {
1688 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1689 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1690 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1691 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1692 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1693 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1694 break;
1695 }
1696
1697 case CPUMCPUIDFEATURE_RDTSCP:
1698 {
1699 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1700 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP)
1701 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1702 {
1703 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1704 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1705 return;
1706 }
1707
1708 /* Valid for AMD only (for now). */
1709 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1710 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1711 break;
1712 }
1713
1714 /*
1715 * Set the Hypervisor Present bit in the standard feature mask.
1716 */
1717 case CPUMCPUIDFEATURE_HVP:
1718 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1719 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
1720 LogRel(("CPUMSetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1721 break;
1722
1723 default:
1724 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1725 break;
1726 }
1727 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1728 {
1729 PVMCPU pVCpu = &pVM->aCpus[i];
1730 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1731 }
1732}
1733
1734
1735/**
1736 * Queries a CPUID feature bit.
1737 *
1738 * @returns boolean for feature presence
1739 * @param pVM Pointer to the VM.
1740 * @param enmFeature The feature to query.
1741 */
1742VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1743{
1744 switch (enmFeature)
1745 {
1746 case CPUMCPUIDFEATURE_PAE:
1747 {
1748 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1749 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1750 break;
1751 }
1752
1753 case CPUMCPUIDFEATURE_NXE:
1754 {
1755 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1756 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_NX);
1757 }
1758
1759 case CPUMCPUIDFEATURE_RDTSCP:
1760 {
1761 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1762 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1763 break;
1764 }
1765
1766 case CPUMCPUIDFEATURE_LONG_MODE:
1767 {
1768 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1769 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1770 break;
1771 }
1772
1773 default:
1774 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1775 break;
1776 }
1777 return false;
1778}
1779
1780
1781/**
1782 * Clears a CPUID feature bit.
1783 *
1784 * @param pVM Pointer to the VM.
1785 * @param enmFeature The feature to clear.
1786 */
1787VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1788{
1789 switch (enmFeature)
1790 {
1791 /*
1792 * Set the APIC bit in both feature masks.
1793 */
1794 case CPUMCPUIDFEATURE_APIC:
1795 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1796 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1797 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1798 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1799 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1800 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1801 break;
1802
1803 /*
1804 * Clear the x2APIC bit in the standard feature mask.
1805 */
1806 case CPUMCPUIDFEATURE_X2APIC:
1807 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1808 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1809 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1810 break;
1811
1812 case CPUMCPUIDFEATURE_PAE:
1813 {
1814 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1815 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1816 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1817 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1818 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1819 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1820 break;
1821 }
1822
1823 case CPUMCPUIDFEATURE_PAT:
1824 {
1825 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1826 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1827 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1828 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1829 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1830 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1831 break;
1832 }
1833
1834 case CPUMCPUIDFEATURE_LONG_MODE:
1835 {
1836 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1837 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1838 break;
1839 }
1840
1841 case CPUMCPUIDFEATURE_LAHF:
1842 {
1843 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1844 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1845 break;
1846 }
1847
1848 case CPUMCPUIDFEATURE_HVP:
1849 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1850 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
1851 break;
1852
1853 default:
1854 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1855 break;
1856 }
1857 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1858 {
1859 PVMCPU pVCpu = &pVM->aCpus[i];
1860 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1861 }
1862}
1863
1864
1865/**
1866 * Gets the host CPU vendor
1867 *
1868 * @returns CPU vendor
1869 * @param pVM Pointer to the VM.
1870 */
1871VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1872{
1873 return pVM->cpum.s.enmHostCpuVendor;
1874}
1875
1876/**
1877 * Gets the CPU vendor
1878 *
1879 * @returns CPU vendor
1880 * @param pVM Pointer to the VM.
1881 */
1882VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1883{
1884 return pVM->cpum.s.enmGuestCpuVendor;
1885}
1886
1887
1888VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1889{
1890 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1891 return CPUMRecalcHyperDRx(pVCpu);
1892}
1893
1894
1895VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1896{
1897 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1898 return CPUMRecalcHyperDRx(pVCpu);
1899}
1900
1901
1902VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1903{
1904 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1905 return CPUMRecalcHyperDRx(pVCpu);
1906}
1907
1908
1909VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1910{
1911 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1912 return CPUMRecalcHyperDRx(pVCpu);
1913}
1914
1915
1916VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1917{
1918 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1919 return CPUMRecalcHyperDRx(pVCpu);
1920}
1921
1922
1923VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1924{
1925 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1926 return CPUMRecalcHyperDRx(pVCpu);
1927}
1928
1929
1930VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1931{
1932 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1933 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1934 if (iReg == 4 || iReg == 5)
1935 iReg += 2;
1936 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1937 return CPUMRecalcHyperDRx(pVCpu);
1938}
1939
1940
1941/**
1942 * Recalculates the hypervisor DRx register values based on
1943 * current guest registers and DBGF breakpoints.
1944 *
1945 * This is called whenever a guest DRx register is modified and when DBGF
1946 * sets a hardware breakpoint. In guest context this function will reload
1947 * any (hyper) DRx registers which comes out with a different value.
1948 *
1949 * @returns VINF_SUCCESS.
1950 * @param pVCpu Pointer to the VMCPU.
1951 */
1952VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1953{
1954 PVM pVM = pVCpu->CTX_SUFF(pVM);
1955
1956 /*
1957 * Compare the DR7s first.
1958 *
1959 * We only care about the enabled flags. The GE and LE flags are always
1960 * set and we don't care if the guest doesn't set them. GD is virtualized
1961 * when we dispatch #DB, we never enable it.
1962 */
1963 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1964#ifdef CPUM_VIRTUALIZE_DRX
1965 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1966#else
1967 const RTGCUINTREG uGstDr7 = 0;
1968#endif
1969 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1970 {
1971 /*
1972 * Ok, something is enabled. Recalc each of the breakpoints.
1973 * Straight forward code, not optimized/minimized in any way.
1974 */
1975 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1976
1977 /* bp 0 */
1978 RTGCUINTREG uNewDr0;
1979 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1980 {
1981 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1982 uNewDr0 = DBGFBpGetDR0(pVM);
1983 }
1984 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1985 {
1986 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1987 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1988 }
1989 else
1990 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
1991
1992 /* bp 1 */
1993 RTGCUINTREG uNewDr1;
1994 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1995 {
1996 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1997 uNewDr1 = DBGFBpGetDR1(pVM);
1998 }
1999 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2000 {
2001 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2002 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2003 }
2004 else
2005 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
2006
2007 /* bp 2 */
2008 RTGCUINTREG uNewDr2;
2009 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2010 {
2011 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2012 uNewDr2 = DBGFBpGetDR2(pVM);
2013 }
2014 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2015 {
2016 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2017 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2018 }
2019 else
2020 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
2021
2022 /* bp 3 */
2023 RTGCUINTREG uNewDr3;
2024 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2025 {
2026 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2027 uNewDr3 = DBGFBpGetDR3(pVM);
2028 }
2029 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2030 {
2031 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2032 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2033 }
2034 else
2035 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
2036
2037 /*
2038 * Apply the updates.
2039 */
2040#ifdef IN_RC
2041 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
2042 {
2043 /** @todo save host DBx registers. */
2044 }
2045#endif
2046 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
2047 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2048 CPUMSetHyperDR3(pVCpu, uNewDr3);
2049 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2050 CPUMSetHyperDR2(pVCpu, uNewDr2);
2051 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2052 CPUMSetHyperDR1(pVCpu, uNewDr1);
2053 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2054 CPUMSetHyperDR0(pVCpu, uNewDr0);
2055 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2056 CPUMSetHyperDR7(pVCpu, uNewDr7);
2057 }
2058 else
2059 {
2060#ifdef IN_RC
2061 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
2062 {
2063 /** @todo restore host DBx registers. */
2064 }
2065#endif
2066 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2067 }
2068 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2069 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2070 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2071 pVCpu->cpum.s.Hyper.dr[7]));
2072
2073 return VINF_SUCCESS;
2074}
2075
2076
2077/**
2078 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2079 *
2080 * @returns true if in real mode, otherwise false.
2081 * @param pVCpu Pointer to the VMCPU.
2082 */
2083VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2084{
2085 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2086}
2087
2088
2089/**
2090 * Tests if the guest has the Page Size Extension enabled (PSE).
2091 *
2092 * @returns true if in real mode, otherwise false.
2093 * @param pVCpu Pointer to the VMCPU.
2094 */
2095VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2096{
2097 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2098 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2099}
2100
2101
2102/**
2103 * Tests if the guest has the paging enabled (PG).
2104 *
2105 * @returns true if in real mode, otherwise false.
2106 * @param pVCpu Pointer to the VMCPU.
2107 */
2108VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2109{
2110 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2111}
2112
2113
2114/**
2115 * Tests if the guest has the paging enabled (PG).
2116 *
2117 * @returns true if in real mode, otherwise false.
2118 * @param pVCpu Pointer to the VMCPU.
2119 */
2120VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2121{
2122 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2123}
2124
2125
2126/**
2127 * Tests if the guest is running in real mode or not.
2128 *
2129 * @returns true if in real mode, otherwise false.
2130 * @param pVCpu Pointer to the VMCPU.
2131 */
2132VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2133{
2134 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2135}
2136
2137
2138/**
2139 * Tests if the guest is running in real or virtual 8086 mode.
2140 *
2141 * @returns @c true if it is, @c false if not.
2142 * @param pVCpu Pointer to the VMCPU.
2143 */
2144VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2145{
2146 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2147 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2148}
2149
2150
2151/**
2152 * Tests if the guest is running in protected or not.
2153 *
2154 * @returns true if in protected mode, otherwise false.
2155 * @param pVCpu Pointer to the VMCPU.
2156 */
2157VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2158{
2159 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2160}
2161
2162
2163/**
2164 * Tests if the guest is running in paged protected or not.
2165 *
2166 * @returns true if in paged protected mode, otherwise false.
2167 * @param pVCpu Pointer to the VMCPU.
2168 */
2169VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2170{
2171 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2172}
2173
2174
2175/**
2176 * Tests if the guest is running in long mode or not.
2177 *
2178 * @returns true if in long mode, otherwise false.
2179 * @param pVCpu Pointer to the VMCPU.
2180 */
2181VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2182{
2183 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2184}
2185
2186
2187/**
2188 * Tests if the guest is running in PAE mode or not.
2189 *
2190 * @returns true if in PAE mode, otherwise false.
2191 * @param pVCpu Pointer to the VMCPU.
2192 */
2193VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2194{
2195 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2196 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
2197 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2198}
2199
2200
2201#ifndef IN_RING0
2202/**
2203 * Updates the EFLAGS while we're in raw-mode.
2204 *
2205 * @param pVCpu Pointer to the VMCPU.
2206 * @param pCtxCore The context core.
2207 * @param eflags The new EFLAGS value.
2208 */
2209VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
2210{
2211 PVM pVM = pVCpu->CTX_SUFF(pVM);
2212
2213 if (!pVCpu->cpum.s.fRawEntered)
2214 {
2215 pCtxCore->eflags.u32 = eflags;
2216 return;
2217 }
2218 PATMRawSetEFlags(pVM, pCtxCore, eflags);
2219}
2220#endif /* !IN_RING0 */
2221
2222
2223/**
2224 * Gets the EFLAGS while we're in raw-mode.
2225 *
2226 * @returns The eflags.
2227 * @param pVCpu Pointer to the VMCPU.
2228 * @param pCtxCore The context core.
2229 */
2230VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2231{
2232#ifdef IN_RING0
2233 NOREF(pVCpu);
2234 return pCtxCore->eflags.u32;
2235#else
2236 PVM pVM = pVCpu->CTX_SUFF(pVM);
2237
2238 if (!pVCpu->cpum.s.fRawEntered)
2239 return pCtxCore->eflags.u32;
2240 return PATMRawGetEFlags(pVM, pCtxCore);
2241#endif
2242}
2243
2244
2245/**
2246 * Sets the specified changed flags (CPUM_CHANGED_*).
2247 *
2248 * @param pVCpu Pointer to the VMCPU.
2249 */
2250VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2251{
2252 pVCpu->cpum.s.fChanged |= fChangedFlags;
2253}
2254
2255
2256/**
2257 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2258 * @returns true if supported.
2259 * @returns false if not supported.
2260 * @param pVM Pointer to the VM.
2261 */
2262VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2263{
2264 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2265}
2266
2267
2268/**
2269 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2270 * @returns true if used.
2271 * @returns false if not used.
2272 * @param pVM Pointer to the VM.
2273 */
2274VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2275{
2276 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2277}
2278
2279
2280/**
2281 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2282 * @returns true if used.
2283 * @returns false if not used.
2284 * @param pVM Pointer to the VM.
2285 */
2286VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2287{
2288 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2289}
2290
2291#ifndef IN_RING3
2292
2293/**
2294 * Lazily sync in the FPU/XMM state
2295 *
2296 * @returns VBox status code.
2297 * @param pVCpu Pointer to the VMCPU.
2298 */
2299VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2300{
2301 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2302}
2303
2304#endif /* !IN_RING3 */
2305
2306/**
2307 * Checks if we activated the FPU/XMM state of the guest OS
2308 * @returns true if we did.
2309 * @returns false if not.
2310 * @param pVCpu Pointer to the VMCPU.
2311 */
2312VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2313{
2314 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2315}
2316
2317
2318/**
2319 * Deactivate the FPU/XMM state of the guest OS
2320 * @param pVCpu Pointer to the VMCPU.
2321 */
2322VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2323{
2324 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2325}
2326
2327
2328/**
2329 * Checks if the guest debug state is active
2330 *
2331 * @returns boolean
2332 * @param pVM Pointer to the VM.
2333 */
2334VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2335{
2336 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2337}
2338
2339/**
2340 * Checks if the hyper debug state is active
2341 *
2342 * @returns boolean
2343 * @param pVM Pointer to the VM.
2344 */
2345VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2346{
2347 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2348}
2349
2350
2351/**
2352 * Mark the guest's debug state as inactive.
2353 *
2354 * @returns boolean
2355 * @param pVM Pointer to the VM.
2356 */
2357VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2358{
2359 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2360}
2361
2362
2363/**
2364 * Mark the hypervisor's debug state as inactive.
2365 *
2366 * @returns boolean
2367 * @param pVM Pointer to the VM.
2368 */
2369VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2370{
2371 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2372}
2373
2374/**
2375 * Checks if the hidden selector registers are valid for the specified CPU.
2376 *
2377 * @returns true if they are.
2378 * @returns false if not.
2379 * @param pVCpu Pointer to the VM.
2380 */
2381VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVMCPU pVCpu)
2382{
2383 bool const fRc = !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID);
2384 Assert(fRc || !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)));
2385 Assert(!pVCpu->cpum.s.fRemEntered);
2386 return fRc;
2387}
2388
2389
2390
2391/**
2392 * Get the current privilege level of the guest.
2393 *
2394 * @returns cpl
2395 * @param pVM Pointer to the VM.
2396 * @param pRegFrame Trap register frame.
2397 */
2398VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2399{
2400 uint32_t cpl;
2401
2402 if (CPUMAreHiddenSelRegsValid(pVCpu))
2403 {
2404 /*
2405 * The hidden CS.DPL register is always equal to the CPL, it is
2406 * not affected by loading a conforming coding segment.
2407 *
2408 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2409 * at SS. (ACP2 regression during install after a far call to ring 2)
2410 *
2411 * Seems it isn't necessiarly true for newer AMD-V CPUs even, we have
2412 * to move the VMCB.guest.u8CPL into Attr.n.u2Dpl to make this (and
2413 * other) code work right. So, forget CS.DPL, always use SS.DPL.
2414 */
2415 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2416 {
2417 if (!pCtxCore->eflags.Bits.u1VM)
2418 cpl = pCtxCore->ss.Attr.n.u2Dpl;
2419 else
2420 cpl = 3; /* REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2421 }
2422 else
2423 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2424 }
2425 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2426 {
2427 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2428 {
2429 /*
2430 * The SS RPL is always equal to the CPL, while the CS RPL
2431 * isn't necessarily equal if the segment is conforming.
2432 * See section 4.11.1 in the AMD manual.
2433 */
2434 cpl = (pCtxCore->ss.Sel & X86_SEL_RPL);
2435#ifndef IN_RING0
2436 if (cpl == 1)
2437 cpl = 0;
2438#endif
2439 }
2440 else
2441 cpl = 3;
2442 }
2443 else
2444 cpl = 0; /* real mode; cpl is zero */
2445
2446 return cpl;
2447}
2448
2449
2450/**
2451 * Gets the current guest CPU mode.
2452 *
2453 * If paging mode is what you need, check out PGMGetGuestMode().
2454 *
2455 * @returns The CPU mode.
2456 * @param pVCpu Pointer to the VMCPU.
2457 */
2458VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2459{
2460 CPUMMODE enmMode;
2461 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2462 enmMode = CPUMMODE_REAL;
2463 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2464 enmMode = CPUMMODE_PROTECTED;
2465 else
2466 enmMode = CPUMMODE_LONG;
2467
2468 return enmMode;
2469}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette