VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 42107

Last change on this file since 42107 was 42034, checked in by vboxsync, 13 years ago

Doxygen.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 68.4 KB
Line 
1/* $Id: CPUMAllRegs.cpp 42034 2012-07-06 03:56:06Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include "CPUMInternal.h"
30#include <VBox/vmm/vm.h>
31#include <VBox/err.h>
32#include <VBox/dis.h>
33#include <VBox/log.h>
34#include <VBox/vmm/hwaccm.h>
35#include <VBox/vmm/tm.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#ifdef IN_RING3
40#include <iprt/thread.h>
41#endif
42
43/** Disable stack frame pointer generation here. */
44#if defined(_MSC_VER) && !defined(DEBUG)
45# pragma optimize("y", off)
46#endif
47
48
49/**
50 * Obsolete.
51 *
52 * We don't support nested hypervisor context interrupts or traps. Life is much
53 * simpler when we don't. It's also slightly faster at times.
54 *
55 * @param pVM Handle to the virtual machine.
56 */
57VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
58{
59 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
60}
61
62
63/**
64 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
65 *
66 * @param pVCpu Pointer to the VMCPU.
67 */
68VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
69{
70 return &pVCpu->cpum.s.Hyper;
71}
72
73
74VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
75{
76 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
77 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
78}
79
80
81VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
82{
83 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
84 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
85}
86
87
88VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
89{
90 pVCpu->cpum.s.Hyper.cr3 = cr3;
91
92#ifdef IN_RC
93 /* Update the current CR3. */
94 ASMSetCR3(cr3);
95#endif
96}
97
98VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
99{
100 return pVCpu->cpum.s.Hyper.cr3;
101}
102
103
104VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
105{
106 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
107}
108
109
110VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
111{
112 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
113}
114
115
116VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
117{
118 pVCpu->cpum.s.Hyper.es.Sel = SelES;
119}
120
121
122VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
123{
124 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
125}
126
127
128VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
129{
130 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
131}
132
133
134VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
135{
136 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
137}
138
139
140VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
141{
142 pVCpu->cpum.s.Hyper.esp = u32ESP;
143}
144
145
146VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
147{
148 pVCpu->cpum.s.Hyper.esp = u32ESP;
149}
150
151
152VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
153{
154 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
155 return VINF_SUCCESS;
156}
157
158
159VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
160{
161 pVCpu->cpum.s.Hyper.eip = u32EIP;
162}
163
164
165/**
166 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
167 * EFLAGS and EIP prior to resuming guest execution.
168 *
169 * All general register not given as a parameter will be set to 0. The EFLAGS
170 * register will be set to sane values for C/C++ code execution with interrupts
171 * disabled and IOPL 0.
172 *
173 * @param pVCpu The current virtual CPU.
174 * @param u32EIP The EIP value.
175 * @param u32ESP The ESP value.
176 * @param u32EAX The EAX value.
177 * @param u32EDX The EDX value.
178 */
179VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
180{
181 pVCpu->cpum.s.Hyper.eip = u32EIP;
182 pVCpu->cpum.s.Hyper.esp = u32ESP;
183 pVCpu->cpum.s.Hyper.eax = u32EAX;
184 pVCpu->cpum.s.Hyper.edx = u32EDX;
185 pVCpu->cpum.s.Hyper.ecx = 0;
186 pVCpu->cpum.s.Hyper.ebx = 0;
187 pVCpu->cpum.s.Hyper.ebp = 0;
188 pVCpu->cpum.s.Hyper.esi = 0;
189 pVCpu->cpum.s.Hyper.edi = 0;
190 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
191}
192
193
194VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
195{
196 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
197}
198
199
200VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
201{
202 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
203}
204
205
206VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
207{
208 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
209 /** @todo in GC we must load it! */
210}
211
212
213VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
214{
215 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
216 /** @todo in GC we must load it! */
217}
218
219
220VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
221{
222 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
223 /** @todo in GC we must load it! */
224}
225
226
227VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
228{
229 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
230 /** @todo in GC we must load it! */
231}
232
233
234VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
235{
236 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
237 /** @todo in GC we must load it! */
238}
239
240
241VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
242{
243 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
244 /** @todo in GC we must load it! */
245}
246
247
248VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
249{
250 return pVCpu->cpum.s.Hyper.cs.Sel;
251}
252
253
254VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
255{
256 return pVCpu->cpum.s.Hyper.ds.Sel;
257}
258
259
260VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
261{
262 return pVCpu->cpum.s.Hyper.es.Sel;
263}
264
265
266VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
267{
268 return pVCpu->cpum.s.Hyper.fs.Sel;
269}
270
271
272VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
273{
274 return pVCpu->cpum.s.Hyper.gs.Sel;
275}
276
277
278VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
279{
280 return pVCpu->cpum.s.Hyper.ss.Sel;
281}
282
283
284VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
285{
286 return pVCpu->cpum.s.Hyper.eax;
287}
288
289
290VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
291{
292 return pVCpu->cpum.s.Hyper.ebx;
293}
294
295
296VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
297{
298 return pVCpu->cpum.s.Hyper.ecx;
299}
300
301
302VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
303{
304 return pVCpu->cpum.s.Hyper.edx;
305}
306
307
308VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
309{
310 return pVCpu->cpum.s.Hyper.esi;
311}
312
313
314VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
315{
316 return pVCpu->cpum.s.Hyper.edi;
317}
318
319
320VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
321{
322 return pVCpu->cpum.s.Hyper.ebp;
323}
324
325
326VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
327{
328 return pVCpu->cpum.s.Hyper.esp;
329}
330
331
332VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
333{
334 return pVCpu->cpum.s.Hyper.eflags.u32;
335}
336
337
338VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
339{
340 return pVCpu->cpum.s.Hyper.eip;
341}
342
343
344VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
345{
346 return pVCpu->cpum.s.Hyper.rip;
347}
348
349
350VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
351{
352 if (pcbLimit)
353 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
354 return pVCpu->cpum.s.Hyper.idtr.pIdt;
355}
356
357
358VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
359{
360 if (pcbLimit)
361 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
362 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
363}
364
365
366VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
367{
368 return pVCpu->cpum.s.Hyper.ldtr.Sel;
369}
370
371
372VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
373{
374 return pVCpu->cpum.s.Hyper.dr[0];
375}
376
377
378VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
379{
380 return pVCpu->cpum.s.Hyper.dr[1];
381}
382
383
384VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
385{
386 return pVCpu->cpum.s.Hyper.dr[2];
387}
388
389
390VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
391{
392 return pVCpu->cpum.s.Hyper.dr[3];
393}
394
395
396VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
397{
398 return pVCpu->cpum.s.Hyper.dr[6];
399}
400
401
402VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
403{
404 return pVCpu->cpum.s.Hyper.dr[7];
405}
406
407
408/**
409 * Gets the pointer to the internal CPUMCTXCORE structure.
410 * This is only for reading in order to save a few calls.
411 *
412 * @param pVCpu Handle to the virtual cpu.
413 */
414VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
415{
416 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
417}
418
419
420/**
421 * Queries the pointer to the internal CPUMCTX structure.
422 *
423 * @returns The CPUMCTX pointer.
424 * @param pVCpu Handle to the virtual cpu.
425 */
426VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
427{
428 return &pVCpu->cpum.s.Guest;
429}
430
431VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
432{
433 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
434 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
435 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
436 return VINF_SUCCESS;
437}
438
439VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
440{
441 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
442 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
443 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
444 return VINF_SUCCESS;
445}
446
447VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
448{
449 pVCpu->cpum.s.Guest.tr.Sel = tr;
450 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
451 return VINF_SUCCESS;
452}
453
454VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
455{
456 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
457 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
458 return VINF_SUCCESS;
459}
460
461
462/**
463 * Set the guest CR0.
464 *
465 * When called in GC, the hyper CR0 may be updated if that is
466 * required. The caller only has to take special action if AM,
467 * WP, PG or PE changes.
468 *
469 * @returns VINF_SUCCESS (consider it void).
470 * @param pVCpu Handle to the virtual cpu.
471 * @param cr0 The new CR0 value.
472 */
473VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
474{
475#ifdef IN_RC
476 /*
477 * Check if we need to change hypervisor CR0 because
478 * of math stuff.
479 */
480 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
481 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
482 {
483 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
484 {
485 /*
486 * We haven't saved the host FPU state yet, so TS and MT are both set
487 * and EM should be reflecting the guest EM (it always does this).
488 */
489 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
490 {
491 uint32_t HyperCR0 = ASMGetCR0();
492 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
493 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
494 HyperCR0 &= ~X86_CR0_EM;
495 HyperCR0 |= cr0 & X86_CR0_EM;
496 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
497 ASMSetCR0(HyperCR0);
498 }
499# ifdef VBOX_STRICT
500 else
501 {
502 uint32_t HyperCR0 = ASMGetCR0();
503 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
504 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
505 }
506# endif
507 }
508 else
509 {
510 /*
511 * Already saved the state, so we're just mirroring
512 * the guest flags.
513 */
514 uint32_t HyperCR0 = ASMGetCR0();
515 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
516 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
517 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
518 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
519 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
520 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
521 ASMSetCR0(HyperCR0);
522 }
523 }
524#endif /* IN_RC */
525
526 /*
527 * Check for changes causing TLB flushes (for REM).
528 * The caller is responsible for calling PGM when appropriate.
529 */
530 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
531 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
532 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
533 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
534
535 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
536 return VINF_SUCCESS;
537}
538
539
540VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
541{
542 pVCpu->cpum.s.Guest.cr2 = cr2;
543 return VINF_SUCCESS;
544}
545
546
547VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
548{
549 pVCpu->cpum.s.Guest.cr3 = cr3;
550 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
551 return VINF_SUCCESS;
552}
553
554
555VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
556{
557 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
558 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
559 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
560 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
561 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
562 cr4 &= ~X86_CR4_OSFSXR;
563 pVCpu->cpum.s.Guest.cr4 = cr4;
564 return VINF_SUCCESS;
565}
566
567
568VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
569{
570 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
571 return VINF_SUCCESS;
572}
573
574
575VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
576{
577 pVCpu->cpum.s.Guest.eip = eip;
578 return VINF_SUCCESS;
579}
580
581
582VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
583{
584 pVCpu->cpum.s.Guest.eax = eax;
585 return VINF_SUCCESS;
586}
587
588
589VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
590{
591 pVCpu->cpum.s.Guest.ebx = ebx;
592 return VINF_SUCCESS;
593}
594
595
596VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
597{
598 pVCpu->cpum.s.Guest.ecx = ecx;
599 return VINF_SUCCESS;
600}
601
602
603VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
604{
605 pVCpu->cpum.s.Guest.edx = edx;
606 return VINF_SUCCESS;
607}
608
609
610VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
611{
612 pVCpu->cpum.s.Guest.esp = esp;
613 return VINF_SUCCESS;
614}
615
616
617VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
618{
619 pVCpu->cpum.s.Guest.ebp = ebp;
620 return VINF_SUCCESS;
621}
622
623
624VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
625{
626 pVCpu->cpum.s.Guest.esi = esi;
627 return VINF_SUCCESS;
628}
629
630
631VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
632{
633 pVCpu->cpum.s.Guest.edi = edi;
634 return VINF_SUCCESS;
635}
636
637
638VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
639{
640 pVCpu->cpum.s.Guest.ss.Sel = ss;
641 return VINF_SUCCESS;
642}
643
644
645VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
646{
647 pVCpu->cpum.s.Guest.cs.Sel = cs;
648 return VINF_SUCCESS;
649}
650
651
652VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
653{
654 pVCpu->cpum.s.Guest.ds.Sel = ds;
655 return VINF_SUCCESS;
656}
657
658
659VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
660{
661 pVCpu->cpum.s.Guest.es.Sel = es;
662 return VINF_SUCCESS;
663}
664
665
666VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
667{
668 pVCpu->cpum.s.Guest.fs.Sel = fs;
669 return VINF_SUCCESS;
670}
671
672
673VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
674{
675 pVCpu->cpum.s.Guest.gs.Sel = gs;
676 return VINF_SUCCESS;
677}
678
679
680VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
681{
682 pVCpu->cpum.s.Guest.msrEFER = val;
683}
684
685
686/**
687 * Query an MSR.
688 *
689 * The caller is responsible for checking privilege if the call is the result
690 * of a RDMSR instruction. We'll do the rest.
691 *
692 * @retval VINF_SUCCESS on success.
693 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
694 * expected to take the appropriate actions. @a *puValue is set to 0.
695 * @param pVCpu Pointer to the VMCPU.
696 * @param idMsr The MSR.
697 * @param puValue Where to return the value.
698 *
699 * @remarks This will always return the right values, even when we're in the
700 * recompiler.
701 */
702VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
703{
704 /*
705 * If we don't indicate MSR support in the CPUID feature bits, indicate
706 * that a #GP(0) should be raised.
707 */
708 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
709 {
710 *puValue = 0;
711 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
712 }
713
714 int rc = VINF_SUCCESS;
715 uint8_t const u8Multiplier = 4;
716 switch (idMsr)
717 {
718 case MSR_IA32_TSC:
719 *puValue = TMCpuTickGet(pVCpu);
720 break;
721
722 case MSR_IA32_APICBASE:
723 rc = PDMApicGetBase(pVCpu->CTX_SUFF(pVM), puValue);
724 if (RT_SUCCESS(rc))
725 rc = VINF_SUCCESS;
726 else
727 {
728 *puValue = 0;
729 rc = VERR_CPUM_RAISE_GP_0;
730 }
731 break;
732
733 case MSR_IA32_CR_PAT:
734 *puValue = pVCpu->cpum.s.Guest.msrPAT;
735 break;
736
737 case MSR_IA32_SYSENTER_CS:
738 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
739 break;
740
741 case MSR_IA32_SYSENTER_EIP:
742 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
743 break;
744
745 case MSR_IA32_SYSENTER_ESP:
746 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
747 break;
748
749 case MSR_IA32_MTRR_CAP:
750 {
751 /* This is currently a bit weird. :-) */
752 uint8_t const cVariableRangeRegs = 0;
753 bool const fSystemManagementRangeRegisters = false;
754 bool const fFixedRangeRegisters = false;
755 bool const fWriteCombiningType = false;
756 *puValue = cVariableRangeRegs
757 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
758 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
759 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
760 break;
761 }
762
763 case MSR_IA32_MTRR_DEF_TYPE:
764 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
765 break;
766
767 case IA32_MTRR_FIX64K_00000:
768 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
769 break;
770 case IA32_MTRR_FIX16K_80000:
771 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
772 break;
773 case IA32_MTRR_FIX16K_A0000:
774 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
775 break;
776 case IA32_MTRR_FIX4K_C0000:
777 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
778 break;
779 case IA32_MTRR_FIX4K_C8000:
780 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
781 break;
782 case IA32_MTRR_FIX4K_D0000:
783 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
784 break;
785 case IA32_MTRR_FIX4K_D8000:
786 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
787 break;
788 case IA32_MTRR_FIX4K_E0000:
789 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
790 break;
791 case IA32_MTRR_FIX4K_E8000:
792 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
793 break;
794 case IA32_MTRR_FIX4K_F0000:
795 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
796 break;
797 case IA32_MTRR_FIX4K_F8000:
798 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
799 break;
800
801 case MSR_K6_EFER:
802 *puValue = pVCpu->cpum.s.Guest.msrEFER;
803 break;
804
805 case MSR_K8_SF_MASK:
806 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
807 break;
808
809 case MSR_K6_STAR:
810 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
811 break;
812
813 case MSR_K8_LSTAR:
814 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
815 break;
816
817 case MSR_K8_CSTAR:
818 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
819 break;
820
821 case MSR_K8_FS_BASE:
822 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
823 break;
824
825 case MSR_K8_GS_BASE:
826 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
827 break;
828
829 case MSR_K8_KERNEL_GS_BASE:
830 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
831 break;
832
833 case MSR_K8_TSC_AUX:
834 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
835 break;
836
837 case MSR_IA32_PERF_STATUS:
838 /** @todo could really be not exactly correct, maybe use host's values */
839 *puValue = UINT64_C(1000) /* TSC increment by tick */
840 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
841 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
842 break;
843
844 case MSR_IA32_FSB_CLOCK_STS:
845 /*
846 * Encoded as:
847 * 0 - 266
848 * 1 - 133
849 * 2 - 200
850 * 3 - return 166
851 * 5 - return 100
852 */
853 *puValue = (2 << 4);
854 break;
855
856 case MSR_IA32_PLATFORM_INFO:
857 *puValue = (u8Multiplier << 8) /* Flex ratio max */
858 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
859 break;
860
861 case MSR_IA32_THERM_STATUS:
862 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
863 *puValue = RT_BIT(31) /* validity bit */
864 | (UINT64_C(20) << 16) /* degrees till TCC */;
865 break;
866
867 case MSR_IA32_MISC_ENABLE:
868#if 0
869 /* Needs to be tested more before enabling. */
870 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
871#else
872 /* Currenty we don't allow guests to modify enable MSRs. */
873 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
874
875 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
876
877 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
878 /** @todo: add more cpuid-controlled features this way. */
879#endif
880 break;
881
882#if 0 /*def IN_RING0 */
883 case MSR_IA32_PLATFORM_ID:
884 case MSR_IA32_BIOS_SIGN_ID:
885 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
886 {
887 /* Available since the P6 family. VT-x implies that this feature is present. */
888 if (idMsr == MSR_IA32_PLATFORM_ID)
889 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
890 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
891 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
892 break;
893 }
894 /* no break */
895#endif
896
897 default:
898 /* In X2APIC specification this range is reserved for APIC control. */
899 if ( idMsr >= MSR_IA32_APIC_START
900 && idMsr < MSR_IA32_APIC_END)
901 {
902 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
903 if (RT_SUCCESS(rc))
904 rc = VINF_SUCCESS;
905 else
906 {
907 *puValue = 0;
908 rc = VERR_CPUM_RAISE_GP_0;
909 }
910 }
911 else
912 {
913 *puValue = 0;
914 rc = VERR_CPUM_RAISE_GP_0;
915 }
916 break;
917 }
918
919 return rc;
920}
921
922
923/**
924 * Sets the MSR.
925 *
926 * The caller is responsible for checking privilege if the call is the result
927 * of a WRMSR instruction. We'll do the rest.
928 *
929 * @retval VINF_SUCCESS on success.
930 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
931 * appropriate actions.
932 *
933 * @param pVCpu Pointer to the VMCPU.
934 * @param idMsr The MSR id.
935 * @param uValue The value to set.
936 *
937 * @remarks Everyone changing MSR values, including the recompiler, shall do it
938 * by calling this method. This makes sure we have current values and
939 * that we trigger all the right actions when something changes.
940 */
941VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
942{
943 /*
944 * If we don't indicate MSR support in the CPUID feature bits, indicate
945 * that a #GP(0) should be raised.
946 */
947 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
948 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
949
950 int rc = VINF_SUCCESS;
951 switch (idMsr)
952 {
953 case MSR_IA32_MISC_ENABLE:
954 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
955 break;
956
957 case MSR_IA32_TSC:
958 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
959 break;
960
961 case MSR_IA32_APICBASE:
962 rc = PDMApicSetBase(pVCpu->CTX_SUFF(pVM), uValue);
963 if (rc != VINF_SUCCESS)
964 rc = VERR_CPUM_RAISE_GP_0;
965 break;
966
967 case MSR_IA32_CR_PAT:
968 pVCpu->cpum.s.Guest.msrPAT = uValue;
969 break;
970
971 case MSR_IA32_SYSENTER_CS:
972 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
973 break;
974
975 case MSR_IA32_SYSENTER_EIP:
976 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
977 break;
978
979 case MSR_IA32_SYSENTER_ESP:
980 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
981 break;
982
983 case MSR_IA32_MTRR_CAP:
984 return VERR_CPUM_RAISE_GP_0;
985
986 case MSR_IA32_MTRR_DEF_TYPE:
987 if ( (uValue & UINT64_C(0xfffffffffffff300))
988 || ( (uValue & 0xff) != 0
989 && (uValue & 0xff) != 1
990 && (uValue & 0xff) != 4
991 && (uValue & 0xff) != 5
992 && (uValue & 0xff) != 6) )
993 {
994 Log(("MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
995 return VERR_CPUM_RAISE_GP_0;
996 }
997 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
998 break;
999
1000 case IA32_MTRR_FIX64K_00000:
1001 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1002 break;
1003 case IA32_MTRR_FIX16K_80000:
1004 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1005 break;
1006 case IA32_MTRR_FIX16K_A0000:
1007 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1008 break;
1009 case IA32_MTRR_FIX4K_C0000:
1010 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1011 break;
1012 case IA32_MTRR_FIX4K_C8000:
1013 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1014 break;
1015 case IA32_MTRR_FIX4K_D0000:
1016 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1017 break;
1018 case IA32_MTRR_FIX4K_D8000:
1019 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1020 break;
1021 case IA32_MTRR_FIX4K_E0000:
1022 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1023 break;
1024 case IA32_MTRR_FIX4K_E8000:
1025 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1026 break;
1027 case IA32_MTRR_FIX4K_F0000:
1028 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1029 break;
1030 case IA32_MTRR_FIX4K_F8000:
1031 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1032 break;
1033
1034 case MSR_K6_EFER:
1035 {
1036 PVM pVM = pVCpu->CTX_SUFF(pVM);
1037 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1038 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1039 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1040 : 0;
1041 uint64_t fMask = 0;
1042
1043 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1044 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
1045 fMask |= MSR_K6_EFER_NXE;
1046 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1047 fMask |= MSR_K6_EFER_LME;
1048 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
1049 fMask |= MSR_K6_EFER_SCE;
1050 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1051 fMask |= MSR_K6_EFER_FFXSR;
1052
1053 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1054 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1055 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1056 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1057 {
1058 Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1059 return VERR_CPUM_RAISE_GP_0;
1060 }
1061
1062 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1063 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1064 ("Unexpected value %RX64\n", uValue));
1065 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1066
1067 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1068 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1069 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1070 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1071 {
1072 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1073 HWACCMFlushTLB(pVCpu);
1074
1075 /* Notify PGM about NXE changes. */
1076 if ( (uOldEFER & MSR_K6_EFER_NXE)
1077 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1078 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1079 }
1080 break;
1081 }
1082
1083 case MSR_K8_SF_MASK:
1084 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1085 break;
1086
1087 case MSR_K6_STAR:
1088 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1089 break;
1090
1091 case MSR_K8_LSTAR:
1092 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1093 break;
1094
1095 case MSR_K8_CSTAR:
1096 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1097 break;
1098
1099 case MSR_K8_FS_BASE:
1100 pVCpu->cpum.s.Guest.fs.u64Base = uValue;
1101 break;
1102
1103 case MSR_K8_GS_BASE:
1104 pVCpu->cpum.s.Guest.gs.u64Base = uValue;
1105 break;
1106
1107 case MSR_K8_KERNEL_GS_BASE:
1108 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1109 break;
1110
1111 case MSR_K8_TSC_AUX:
1112 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1113 break;
1114
1115 default:
1116 /* In X2APIC specification this range is reserved for APIC control. */
1117 if ( idMsr >= MSR_IA32_APIC_START
1118 && idMsr < MSR_IA32_APIC_END)
1119 {
1120 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1121 if (rc != VINF_SUCCESS)
1122 rc = VERR_CPUM_RAISE_GP_0;
1123 }
1124 else
1125 {
1126 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1127 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1128 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1129 }
1130 break;
1131 }
1132 return rc;
1133}
1134
1135
1136VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1137{
1138 if (pcbLimit)
1139 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1140 return pVCpu->cpum.s.Guest.idtr.pIdt;
1141}
1142
1143
1144VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1145{
1146 if (pHidden)
1147 *pHidden = pVCpu->cpum.s.Guest.tr;
1148 return pVCpu->cpum.s.Guest.tr.Sel;
1149}
1150
1151
1152VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1153{
1154 return pVCpu->cpum.s.Guest.cs.Sel;
1155}
1156
1157
1158VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1159{
1160 return pVCpu->cpum.s.Guest.ds.Sel;
1161}
1162
1163
1164VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1165{
1166 return pVCpu->cpum.s.Guest.es.Sel;
1167}
1168
1169
1170VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1171{
1172 return pVCpu->cpum.s.Guest.fs.Sel;
1173}
1174
1175
1176VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1177{
1178 return pVCpu->cpum.s.Guest.gs.Sel;
1179}
1180
1181
1182VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1183{
1184 return pVCpu->cpum.s.Guest.ss.Sel;
1185}
1186
1187
1188VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1189{
1190 return pVCpu->cpum.s.Guest.ldtr.Sel;
1191}
1192
1193
1194VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1195{
1196 return pVCpu->cpum.s.Guest.cr0;
1197}
1198
1199
1200VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1201{
1202 return pVCpu->cpum.s.Guest.cr2;
1203}
1204
1205
1206VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1207{
1208 return pVCpu->cpum.s.Guest.cr3;
1209}
1210
1211
1212VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1213{
1214 return pVCpu->cpum.s.Guest.cr4;
1215}
1216
1217
1218VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1219{
1220 uint64_t u64;
1221 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1222 if (RT_FAILURE(rc))
1223 u64 = 0;
1224 return u64;
1225}
1226
1227
1228VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1229{
1230 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1231}
1232
1233
1234VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1235{
1236 return pVCpu->cpum.s.Guest.eip;
1237}
1238
1239
1240VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1241{
1242 return pVCpu->cpum.s.Guest.rip;
1243}
1244
1245
1246VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1247{
1248 return pVCpu->cpum.s.Guest.eax;
1249}
1250
1251
1252VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1253{
1254 return pVCpu->cpum.s.Guest.ebx;
1255}
1256
1257
1258VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1259{
1260 return pVCpu->cpum.s.Guest.ecx;
1261}
1262
1263
1264VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1265{
1266 return pVCpu->cpum.s.Guest.edx;
1267}
1268
1269
1270VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1271{
1272 return pVCpu->cpum.s.Guest.esi;
1273}
1274
1275
1276VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1277{
1278 return pVCpu->cpum.s.Guest.edi;
1279}
1280
1281
1282VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1283{
1284 return pVCpu->cpum.s.Guest.esp;
1285}
1286
1287
1288VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1289{
1290 return pVCpu->cpum.s.Guest.ebp;
1291}
1292
1293
1294VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1295{
1296 return pVCpu->cpum.s.Guest.eflags.u32;
1297}
1298
1299
1300VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1301{
1302 switch (iReg)
1303 {
1304 case DISCREG_CR0:
1305 *pValue = pVCpu->cpum.s.Guest.cr0;
1306 break;
1307
1308 case DISCREG_CR2:
1309 *pValue = pVCpu->cpum.s.Guest.cr2;
1310 break;
1311
1312 case DISCREG_CR3:
1313 *pValue = pVCpu->cpum.s.Guest.cr3;
1314 break;
1315
1316 case DISCREG_CR4:
1317 *pValue = pVCpu->cpum.s.Guest.cr4;
1318 break;
1319
1320 case DISCREG_CR8:
1321 {
1322 uint8_t u8Tpr;
1323 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /*pfPending*/);
1324 if (RT_FAILURE(rc))
1325 {
1326 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1327 *pValue = 0;
1328 return rc;
1329 }
1330 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1331 break;
1332 }
1333
1334 default:
1335 return VERR_INVALID_PARAMETER;
1336 }
1337 return VINF_SUCCESS;
1338}
1339
1340
1341VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1342{
1343 return pVCpu->cpum.s.Guest.dr[0];
1344}
1345
1346
1347VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1348{
1349 return pVCpu->cpum.s.Guest.dr[1];
1350}
1351
1352
1353VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1354{
1355 return pVCpu->cpum.s.Guest.dr[2];
1356}
1357
1358
1359VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1360{
1361 return pVCpu->cpum.s.Guest.dr[3];
1362}
1363
1364
1365VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1366{
1367 return pVCpu->cpum.s.Guest.dr[6];
1368}
1369
1370
1371VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1372{
1373 return pVCpu->cpum.s.Guest.dr[7];
1374}
1375
1376
1377VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1378{
1379 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1380 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1381 if (iReg == 4 || iReg == 5)
1382 iReg += 2;
1383 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1384 return VINF_SUCCESS;
1385}
1386
1387
1388VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1389{
1390 return pVCpu->cpum.s.Guest.msrEFER;
1391}
1392
1393
1394/**
1395 * Gets a CPUID leaf.
1396 *
1397 * @param pVCpu Pointer to the VMCPU.
1398 * @param iLeaf The CPUID leaf to get.
1399 * @param pEax Where to store the EAX value.
1400 * @param pEbx Where to store the EBX value.
1401 * @param pEcx Where to store the ECX value.
1402 * @param pEdx Where to store the EDX value.
1403 */
1404VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1405{
1406 PVM pVM = pVCpu->CTX_SUFF(pVM);
1407
1408 PCCPUMCPUID pCpuId;
1409 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1410 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1411 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1412 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1413 else if ( iLeaf - UINT32_C(0x40000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdHyper)
1414 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP))
1415 pCpuId = &pVM->cpum.s.aGuestCpuIdHyper[iLeaf - UINT32_C(0x40000000)]; /* Only report if HVP bit set. */
1416 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1417 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1418 else
1419 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1420
1421 uint32_t cCurrentCacheIndex = *pEcx;
1422
1423 *pEax = pCpuId->eax;
1424 *pEbx = pCpuId->ebx;
1425 *pEcx = pCpuId->ecx;
1426 *pEdx = pCpuId->edx;
1427
1428 if ( iLeaf == 1)
1429 {
1430 /* Bits 31-24: Initial APIC ID */
1431 Assert(pVCpu->idCpu <= 255);
1432 *pEbx |= (pVCpu->idCpu << 24);
1433 }
1434
1435 if ( iLeaf == 4
1436 && cCurrentCacheIndex < 3
1437 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1438 {
1439 uint32_t type, level, sharing, linesize,
1440 partitions, associativity, sets, cores;
1441
1442 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1443 partitions = 1;
1444 /* Those are only to shut up compiler, as they will always
1445 get overwritten, and compiler should be able to figure that out */
1446 sets = associativity = sharing = level = 1;
1447 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1448 switch (cCurrentCacheIndex)
1449 {
1450 case 0:
1451 type = 1;
1452 level = 1;
1453 sharing = 1;
1454 linesize = 64;
1455 associativity = 8;
1456 sets = 64;
1457 break;
1458 case 1:
1459 level = 1;
1460 type = 2;
1461 sharing = 1;
1462 linesize = 64;
1463 associativity = 8;
1464 sets = 64;
1465 break;
1466 default: /* shut up gcc.*/
1467 AssertFailed();
1468 case 2:
1469 level = 2;
1470 type = 3;
1471 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1472 linesize = 64;
1473 associativity = 24;
1474 sets = 4096;
1475 break;
1476 }
1477
1478 *pEax |= ((cores - 1) << 26) |
1479 ((sharing - 1) << 14) |
1480 (level << 5) |
1481 1;
1482 *pEbx = (linesize - 1) |
1483 ((partitions - 1) << 12) |
1484 ((associativity - 1) << 22); /* -1 encoding */
1485 *pEcx = sets - 1;
1486 }
1487
1488 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1489}
1490
1491/**
1492 * Gets a number of standard CPUID leafs.
1493 *
1494 * @returns Number of leafs.
1495 * @param pVM Pointer to the VM.
1496 * @remark Intended for PATM.
1497 */
1498VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1499{
1500 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1501}
1502
1503
1504/**
1505 * Gets a number of extended CPUID leafs.
1506 *
1507 * @returns Number of leafs.
1508 * @param pVM Pointer to the VM.
1509 * @remark Intended for PATM.
1510 */
1511VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1512{
1513 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1514}
1515
1516
1517/**
1518 * Gets a number of centaur CPUID leafs.
1519 *
1520 * @returns Number of leafs.
1521 * @param pVM Pointer to the VM.
1522 * @remark Intended for PATM.
1523 */
1524VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1525{
1526 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1527}
1528
1529
1530/**
1531 * Sets a CPUID feature bit.
1532 *
1533 * @param pVM Pointer to the VM.
1534 * @param enmFeature The feature to set.
1535 */
1536VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1537{
1538 switch (enmFeature)
1539 {
1540 /*
1541 * Set the APIC bit in both feature masks.
1542 */
1543 case CPUMCPUIDFEATURE_APIC:
1544 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1545 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1546 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1547 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1548 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1549 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1550 break;
1551
1552 /*
1553 * Set the x2APIC bit in the standard feature mask.
1554 */
1555 case CPUMCPUIDFEATURE_X2APIC:
1556 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1557 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1558 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1559 break;
1560
1561 /*
1562 * Set the sysenter/sysexit bit in the standard feature mask.
1563 * Assumes the caller knows what it's doing! (host must support these)
1564 */
1565 case CPUMCPUIDFEATURE_SEP:
1566 {
1567 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1568 {
1569 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1570 return;
1571 }
1572
1573 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1574 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1575 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1576 break;
1577 }
1578
1579 /*
1580 * Set the syscall/sysret bit in the extended feature mask.
1581 * Assumes the caller knows what it's doing! (host must support these)
1582 */
1583 case CPUMCPUIDFEATURE_SYSCALL:
1584 {
1585 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1586 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
1587 {
1588#if HC_ARCH_BITS == 32
1589 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
1590 * Even when the cpu is capable of doing so in 64 bits mode.
1591 */
1592 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1593 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1594 || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
1595#endif
1596 {
1597 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1598 return;
1599 }
1600 }
1601 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1602 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1603 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1604 break;
1605 }
1606
1607 /*
1608 * Set the PAE bit in both feature masks.
1609 * Assumes the caller knows what it's doing! (host must support these)
1610 */
1611 case CPUMCPUIDFEATURE_PAE:
1612 {
1613 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1614 {
1615 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1616 return;
1617 }
1618
1619 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1620 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1621 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1622 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1623 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1624 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1625 break;
1626 }
1627
1628 /*
1629 * Set the LONG MODE bit in the extended feature mask.
1630 * Assumes the caller knows what it's doing! (host must support these)
1631 */
1632 case CPUMCPUIDFEATURE_LONG_MODE:
1633 {
1634 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1635 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
1636 {
1637 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1638 return;
1639 }
1640
1641 /* Valid for both Intel and AMD. */
1642 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1643 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1644 break;
1645 }
1646
1647 /*
1648 * Set the NX/XD bit in the extended feature mask.
1649 * Assumes the caller knows what it's doing! (host must support these)
1650 */
1651 case CPUMCPUIDFEATURE_NX:
1652 {
1653 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1654 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
1655 {
1656 LogRel(("WARNING: Can't turn on NX/XD when the host doesn't support it!!\n"));
1657 return;
1658 }
1659
1660 /* Valid for both Intel and AMD. */
1661 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1662 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NX\n"));
1663 break;
1664 }
1665
1666 /*
1667 * Set the LAHF/SAHF support in 64-bit mode.
1668 * Assumes the caller knows what it's doing! (host must support this)
1669 */
1670 case CPUMCPUIDFEATURE_LAHF:
1671 {
1672 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1673 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
1674 {
1675 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1676 return;
1677 }
1678
1679 /* Valid for both Intel and AMD. */
1680 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1681 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1682 break;
1683 }
1684
1685 case CPUMCPUIDFEATURE_PAT:
1686 {
1687 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1688 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1689 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1690 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1691 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1692 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1693 break;
1694 }
1695
1696 /*
1697 * Set the RDTSCP support bit.
1698 * Assumes the caller knows what it's doing! (host must support this)
1699 */
1700 case CPUMCPUIDFEATURE_RDTSCP:
1701 {
1702 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1703 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
1704 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1705 {
1706 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1707 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1708 return;
1709 }
1710
1711 /* Valid for both Intel and AMD. */
1712 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1713 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1714 break;
1715 }
1716
1717 /*
1718 * Set the Hypervisor Present bit in the standard feature mask.
1719 */
1720 case CPUMCPUIDFEATURE_HVP:
1721 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1722 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
1723 LogRel(("CPUMSetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1724 break;
1725
1726 default:
1727 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1728 break;
1729 }
1730 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1731 {
1732 PVMCPU pVCpu = &pVM->aCpus[i];
1733 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1734 }
1735}
1736
1737
1738/**
1739 * Queries a CPUID feature bit.
1740 *
1741 * @returns boolean for feature presence
1742 * @param pVM Pointer to the VM.
1743 * @param enmFeature The feature to query.
1744 */
1745VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1746{
1747 switch (enmFeature)
1748 {
1749 case CPUMCPUIDFEATURE_PAE:
1750 {
1751 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1752 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1753 break;
1754 }
1755
1756 case CPUMCPUIDFEATURE_NX:
1757 {
1758 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1759 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
1760 }
1761
1762 case CPUMCPUIDFEATURE_RDTSCP:
1763 {
1764 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1765 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
1766 break;
1767 }
1768
1769 case CPUMCPUIDFEATURE_LONG_MODE:
1770 {
1771 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1772 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
1773 break;
1774 }
1775
1776 default:
1777 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1778 break;
1779 }
1780 return false;
1781}
1782
1783
1784/**
1785 * Clears a CPUID feature bit.
1786 *
1787 * @param pVM Pointer to the VM.
1788 * @param enmFeature The feature to clear.
1789 */
1790VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1791{
1792 switch (enmFeature)
1793 {
1794 /*
1795 * Set the APIC bit in both feature masks.
1796 */
1797 case CPUMCPUIDFEATURE_APIC:
1798 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1799 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1800 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1801 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1802 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1803 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1804 break;
1805
1806 /*
1807 * Clear the x2APIC bit in the standard feature mask.
1808 */
1809 case CPUMCPUIDFEATURE_X2APIC:
1810 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1811 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1812 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1813 break;
1814
1815 case CPUMCPUIDFEATURE_PAE:
1816 {
1817 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1818 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1819 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1820 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1821 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1822 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1823 break;
1824 }
1825
1826 case CPUMCPUIDFEATURE_PAT:
1827 {
1828 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1829 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1830 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1831 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1832 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1833 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1834 break;
1835 }
1836
1837 case CPUMCPUIDFEATURE_LONG_MODE:
1838 {
1839 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1840 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1841 break;
1842 }
1843
1844 case CPUMCPUIDFEATURE_LAHF:
1845 {
1846 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1847 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1848 break;
1849 }
1850
1851 case CPUMCPUIDFEATURE_RDTSCP:
1852 {
1853 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1854 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1855 LogRel(("CPUMClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
1856 break;
1857 }
1858
1859 case CPUMCPUIDFEATURE_HVP:
1860 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1861 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
1862 break;
1863
1864 default:
1865 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1866 break;
1867 }
1868 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1869 {
1870 PVMCPU pVCpu = &pVM->aCpus[i];
1871 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1872 }
1873}
1874
1875
1876/**
1877 * Gets the host CPU vendor.
1878 *
1879 * @returns CPU vendor.
1880 * @param pVM Pointer to the VM.
1881 */
1882VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1883{
1884 return pVM->cpum.s.enmHostCpuVendor;
1885}
1886
1887
1888/**
1889 * Gets the CPU vendor.
1890 *
1891 * @returns CPU vendor.
1892 * @param pVM Pointer to the VM.
1893 */
1894VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1895{
1896 return pVM->cpum.s.enmGuestCpuVendor;
1897}
1898
1899
1900VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1901{
1902 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1903 return CPUMRecalcHyperDRx(pVCpu);
1904}
1905
1906
1907VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1908{
1909 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1910 return CPUMRecalcHyperDRx(pVCpu);
1911}
1912
1913
1914VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1915{
1916 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1917 return CPUMRecalcHyperDRx(pVCpu);
1918}
1919
1920
1921VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1922{
1923 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1924 return CPUMRecalcHyperDRx(pVCpu);
1925}
1926
1927
1928VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1929{
1930 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1931 return CPUMRecalcHyperDRx(pVCpu);
1932}
1933
1934
1935VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1936{
1937 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1938 return CPUMRecalcHyperDRx(pVCpu);
1939}
1940
1941
1942VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1943{
1944 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1945 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1946 if (iReg == 4 || iReg == 5)
1947 iReg += 2;
1948 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1949 return CPUMRecalcHyperDRx(pVCpu);
1950}
1951
1952
1953/**
1954 * Recalculates the hypervisor DRx register values based on
1955 * current guest registers and DBGF breakpoints.
1956 *
1957 * This is called whenever a guest DRx register is modified and when DBGF
1958 * sets a hardware breakpoint. In guest context this function will reload
1959 * any (hyper) DRx registers which comes out with a different value.
1960 *
1961 * @returns VINF_SUCCESS.
1962 * @param pVCpu Pointer to the VMCPU.
1963 */
1964VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1965{
1966 PVM pVM = pVCpu->CTX_SUFF(pVM);
1967
1968 /*
1969 * Compare the DR7s first.
1970 *
1971 * We only care about the enabled flags. The GE and LE flags are always
1972 * set and we don't care if the guest doesn't set them. GD is virtualized
1973 * when we dispatch #DB, we never enable it.
1974 */
1975 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1976#ifdef CPUM_VIRTUALIZE_DRX
1977 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1978#else
1979 const RTGCUINTREG uGstDr7 = 0;
1980#endif
1981 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1982 {
1983 /*
1984 * Ok, something is enabled. Recalc each of the breakpoints.
1985 * Straight forward code, not optimized/minimized in any way.
1986 */
1987 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1988
1989 /* bp 0 */
1990 RTGCUINTREG uNewDr0;
1991 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1992 {
1993 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1994 uNewDr0 = DBGFBpGetDR0(pVM);
1995 }
1996 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1997 {
1998 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1999 uNewDr0 = CPUMGetGuestDR0(pVCpu);
2000 }
2001 else
2002 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
2003
2004 /* bp 1 */
2005 RTGCUINTREG uNewDr1;
2006 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
2007 {
2008 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2009 uNewDr1 = DBGFBpGetDR1(pVM);
2010 }
2011 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2012 {
2013 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2014 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2015 }
2016 else
2017 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
2018
2019 /* bp 2 */
2020 RTGCUINTREG uNewDr2;
2021 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2022 {
2023 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2024 uNewDr2 = DBGFBpGetDR2(pVM);
2025 }
2026 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2027 {
2028 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2029 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2030 }
2031 else
2032 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
2033
2034 /* bp 3 */
2035 RTGCUINTREG uNewDr3;
2036 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2037 {
2038 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2039 uNewDr3 = DBGFBpGetDR3(pVM);
2040 }
2041 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2042 {
2043 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2044 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2045 }
2046 else
2047 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
2048
2049 /*
2050 * Apply the updates.
2051 */
2052#ifdef IN_RC
2053 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
2054 {
2055 /** @todo save host DBx registers. */
2056 }
2057#endif
2058 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
2059 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2060 CPUMSetHyperDR3(pVCpu, uNewDr3);
2061 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2062 CPUMSetHyperDR2(pVCpu, uNewDr2);
2063 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2064 CPUMSetHyperDR1(pVCpu, uNewDr1);
2065 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2066 CPUMSetHyperDR0(pVCpu, uNewDr0);
2067 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2068 CPUMSetHyperDR7(pVCpu, uNewDr7);
2069 }
2070 else
2071 {
2072#ifdef IN_RC
2073 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
2074 {
2075 /** @todo restore host DBx registers. */
2076 }
2077#endif
2078 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2079 }
2080 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2081 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2082 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2083 pVCpu->cpum.s.Hyper.dr[7]));
2084
2085 return VINF_SUCCESS;
2086}
2087
2088
2089/**
2090 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2091 *
2092 * @returns true if in real mode, otherwise false.
2093 * @param pVCpu Pointer to the VMCPU.
2094 */
2095VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2096{
2097 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2098}
2099
2100
2101/**
2102 * Tests if the guest has the Page Size Extension enabled (PSE).
2103 *
2104 * @returns true if in real mode, otherwise false.
2105 * @param pVCpu Pointer to the VMCPU.
2106 */
2107VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2108{
2109 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2110 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2111}
2112
2113
2114/**
2115 * Tests if the guest has the paging enabled (PG).
2116 *
2117 * @returns true if in real mode, otherwise false.
2118 * @param pVCpu Pointer to the VMCPU.
2119 */
2120VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2121{
2122 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2123}
2124
2125
2126/**
2127 * Tests if the guest has the paging enabled (PG).
2128 *
2129 * @returns true if in real mode, otherwise false.
2130 * @param pVCpu Pointer to the VMCPU.
2131 */
2132VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2133{
2134 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2135}
2136
2137
2138/**
2139 * Tests if the guest is running in real mode or not.
2140 *
2141 * @returns true if in real mode, otherwise false.
2142 * @param pVCpu Pointer to the VMCPU.
2143 */
2144VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2145{
2146 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2147}
2148
2149
2150/**
2151 * Tests if the guest is running in real or virtual 8086 mode.
2152 *
2153 * @returns @c true if it is, @c false if not.
2154 * @param pVCpu Pointer to the VMCPU.
2155 */
2156VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2157{
2158 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2159 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2160}
2161
2162
2163/**
2164 * Tests if the guest is running in protected or not.
2165 *
2166 * @returns true if in protected mode, otherwise false.
2167 * @param pVCpu Pointer to the VMCPU.
2168 */
2169VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2170{
2171 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2172}
2173
2174
2175/**
2176 * Tests if the guest is running in paged protected or not.
2177 *
2178 * @returns true if in paged protected mode, otherwise false.
2179 * @param pVCpu Pointer to the VMCPU.
2180 */
2181VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2182{
2183 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2184}
2185
2186
2187/**
2188 * Tests if the guest is running in long mode or not.
2189 *
2190 * @returns true if in long mode, otherwise false.
2191 * @param pVCpu Pointer to the VMCPU.
2192 */
2193VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2194{
2195 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2196}
2197
2198
2199/**
2200 * Tests if the guest is running in PAE mode or not.
2201 *
2202 * @returns true if in PAE mode, otherwise false.
2203 * @param pVCpu Pointer to the VMCPU.
2204 */
2205VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2206{
2207 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2208 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
2209 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2210}
2211
2212
2213#ifndef IN_RING0
2214/**
2215 * Updates the EFLAGS while we're in raw-mode.
2216 *
2217 * @param pVCpu Pointer to the VMCPU.
2218 * @param fEfl The new EFLAGS value.
2219 */
2220VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2221{
2222 if (!pVCpu->cpum.s.fRawEntered)
2223 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2224 else
2225 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
2226}
2227#endif /* !IN_RING0 */
2228
2229
2230/**
2231 * Gets the EFLAGS while we're in raw-mode.
2232 *
2233 * @returns The eflags.
2234 * @param pVCpu Pointer to the current virtual CPU.
2235 */
2236VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2237{
2238#ifdef IN_RING0
2239 return pVCpu->cpum.s.Guest.eflags.u32;
2240#else
2241
2242 if (!pVCpu->cpum.s.fRawEntered)
2243 return pVCpu->cpum.s.Guest.eflags.u32;
2244 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
2245#endif
2246}
2247
2248
2249/**
2250 * Sets the specified changed flags (CPUM_CHANGED_*).
2251 *
2252 * @param pVCpu Pointer to the current virtual CPU.
2253 */
2254VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2255{
2256 pVCpu->cpum.s.fChanged |= fChangedFlags;
2257}
2258
2259
2260/**
2261 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2262 * @returns true if supported.
2263 * @returns false if not supported.
2264 * @param pVM Pointer to the VM.
2265 */
2266VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2267{
2268 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2269}
2270
2271
2272/**
2273 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2274 * @returns true if used.
2275 * @returns false if not used.
2276 * @param pVM Pointer to the VM.
2277 */
2278VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2279{
2280 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2281}
2282
2283
2284/**
2285 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2286 * @returns true if used.
2287 * @returns false if not used.
2288 * @param pVM Pointer to the VM.
2289 */
2290VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2291{
2292 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2293}
2294
2295#ifndef IN_RING3
2296
2297/**
2298 * Lazily sync in the FPU/XMM state.
2299 *
2300 * @returns VBox status code.
2301 * @param pVCpu Pointer to the VMCPU.
2302 */
2303VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2304{
2305 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2306}
2307
2308#endif /* !IN_RING3 */
2309
2310/**
2311 * Checks if we activated the FPU/XMM state of the guest OS.
2312 * @returns true if we did.
2313 * @returns false if not.
2314 * @param pVCpu Pointer to the VMCPU.
2315 */
2316VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2317{
2318 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2319}
2320
2321
2322/**
2323 * Deactivate the FPU/XMM state of the guest OS.
2324 * @param pVCpu Pointer to the VMCPU.
2325 */
2326VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2327{
2328 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2329}
2330
2331
2332/**
2333 * Checks if the guest debug state is active.
2334 *
2335 * @returns boolean
2336 * @param pVM Pointer to the VM.
2337 */
2338VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2339{
2340 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2341}
2342
2343/**
2344 * Checks if the hyper debug state is active.
2345 *
2346 * @returns boolean
2347 * @param pVM Pointer to the VM.
2348 */
2349VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2350{
2351 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2352}
2353
2354
2355/**
2356 * Mark the guest's debug state as inactive.
2357 *
2358 * @returns boolean
2359 * @param pVM Pointer to the VM.
2360 */
2361VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2362{
2363 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2364}
2365
2366
2367/**
2368 * Mark the hypervisor's debug state as inactive.
2369 *
2370 * @returns boolean
2371 * @param pVM Pointer to the VM.
2372 */
2373VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2374{
2375 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2376}
2377
2378/**
2379 * Checks if the hidden selector registers are valid for the specified CPU.
2380 *
2381 * @returns true if they are.
2382 * @returns false if not.
2383 * @param pVCpu Pointer to the VM.
2384 */
2385VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVMCPU pVCpu)
2386{
2387 bool const fRc = !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID);
2388 Assert(fRc || !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)));
2389 Assert(!pVCpu->cpum.s.fRemEntered);
2390 return fRc;
2391}
2392
2393
2394
2395/**
2396 * Get the current privilege level of the guest.
2397 *
2398 * @returns CPL
2399 * @param pVCpu Pointer to the current virtual CPU.
2400 */
2401VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2402{
2403 uint32_t uCpl;
2404
2405 if (CPUMAreHiddenSelRegsValid(pVCpu))
2406 {
2407 /*
2408 * CPL can reliably be found in SS.DPL.
2409 *
2410 * Note! We used to check CS.DPL here, assuming it was always equal to
2411 * CPL even if a conforming segment was loaded. But this truned out to
2412 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2413 * during install after a far call to ring 2 with VT-x. Then on newer
2414 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2415 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2416 *
2417 * So, forget CS.DPL, always use SS.DPL.
2418 */
2419 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2420 {
2421 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2422 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2423 else
2424 uCpl = 3; /* REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2425 }
2426 else
2427 uCpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2428 }
2429 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2430 {
2431 if (RT_LIKELY(!pVCpu->cpum.s.Guest.eflags.Bits.u1VM))
2432 {
2433 /*
2434 * The SS RPL is always equal to the CPL, while the CS RPL
2435 * isn't necessarily equal if the segment is conforming.
2436 * See section 4.11.1 in the AMD manual.
2437 */
2438 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2439#ifndef IN_RING0
2440 if (uCpl == 1)
2441 uCpl = 0;
2442#endif
2443 }
2444 else
2445 uCpl = 3;
2446 }
2447 else
2448 uCpl = 0; /* real mode; CPL is zero */
2449
2450 return uCpl;
2451}
2452
2453
2454/**
2455 * Gets the current guest CPU mode.
2456 *
2457 * If paging mode is what you need, check out PGMGetGuestMode().
2458 *
2459 * @returns The CPU mode.
2460 * @param pVCpu Pointer to the VMCPU.
2461 */
2462VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2463{
2464 CPUMMODE enmMode;
2465 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2466 enmMode = CPUMMODE_REAL;
2467 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2468 enmMode = CPUMMODE_PROTECTED;
2469 else
2470 enmMode = CPUMMODE_LONG;
2471
2472 return enmMode;
2473}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette