VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 40210

Last change on this file since 40210 was 40170, checked in by vboxsync, 13 years ago

MSRs and MTRRs, CPUM saved state changed. (linux 2.4.31 seems to ignore the capabilites when it comes to fixed MTRRs.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 69.5 KB
Line 
1/* $Id: CPUMAllRegs.cpp 40170 2012-02-17 14:22:26Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include "CPUMInternal.h"
30#include <VBox/vmm/vm.h>
31#include <VBox/err.h>
32#include <VBox/dis.h>
33#include <VBox/log.h>
34#include <VBox/vmm/hwaccm.h>
35#include <VBox/vmm/tm.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#ifdef IN_RING3
40#include <iprt/thread.h>
41#endif
42
43/** Disable stack frame pointer generation here. */
44#if defined(_MSC_VER) && !defined(DEBUG)
45# pragma optimize("y", off)
46#endif
47
48
49/**
50 * Sets or resets an alternative hypervisor context core.
51 *
52 * This is called when we get a hypervisor trap set switch the context
53 * core with the trap frame on the stack. It is called again to reset
54 * back to the default context core when resuming hypervisor execution.
55 *
56 * @param pVCpu The VMCPU handle.
57 * @param pCtxCore Pointer to the alternative context core or NULL
58 * to go back to the default context core.
59 */
60VMMDECL(void) CPUMHyperSetCtxCore(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
61{
62 PVM pVM = pVCpu->CTX_SUFF(pVM);
63
64 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVCpu->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
65 if (!pCtxCore)
66 {
67 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
68 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
69 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
70 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
71 }
72 else
73 {
74 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
75 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
76 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
77 }
78}
79
80
81/**
82 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
83 * This is only for reading in order to save a few calls.
84 *
85 * @param pVM Handle to the virtual machine.
86 */
87VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
88{
89 return pVCpu->cpum.s.CTX_SUFF(pHyperCore);
90}
91
92
93/**
94 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
95 *
96 * @returns VBox status code.
97 * @param pVM Handle to the virtual machine.
98 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
99 *
100 * @deprecated This will *not* (and has never) given the right picture of the
101 * hypervisor register state. With CPUMHyperSetCtxCore() this is
102 * getting much worse. So, use the individual functions for getting
103 * and esp. setting the hypervisor registers.
104 */
105VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx)
106{
107 *ppCtx = &pVCpu->cpum.s.Hyper;
108 return VINF_SUCCESS;
109}
110
111
112VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
113{
114 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
115 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
116 pVCpu->cpum.s.Hyper.gdtrPadding = 0;
117}
118
119
120VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
121{
122 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
123 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
124 pVCpu->cpum.s.Hyper.idtrPadding = 0;
125}
126
127
128VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
129{
130 pVCpu->cpum.s.Hyper.cr3 = cr3;
131
132#ifdef IN_RC
133 /* Update the current CR3. */
134 ASMSetCR3(cr3);
135#endif
136}
137
138VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
139{
140 return pVCpu->cpum.s.Hyper.cr3;
141}
142
143
144VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
145{
146 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
147}
148
149
150VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
151{
152 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
153}
154
155
156VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
157{
158 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
159}
160
161
162VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
163{
164 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
165}
166
167
168VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
169{
170 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
171}
172
173
174VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
175{
176 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
177}
178
179
180VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
181{
182 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
183}
184
185
186VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
187{
188 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
189 return VINF_SUCCESS;
190}
191
192
193VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
194{
195 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
196}
197
198
199VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
200{
201 pVCpu->cpum.s.Hyper.tr = SelTR;
202}
203
204
205VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
206{
207 pVCpu->cpum.s.Hyper.ldtr = SelLDTR;
208}
209
210
211VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
212{
213 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
214 /** @todo in GC we must load it! */
215}
216
217
218VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
219{
220 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
221 /** @todo in GC we must load it! */
222}
223
224
225VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
226{
227 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
228 /** @todo in GC we must load it! */
229}
230
231
232VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
233{
234 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
235 /** @todo in GC we must load it! */
236}
237
238
239VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
240{
241 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
242 /** @todo in GC we must load it! */
243}
244
245
246VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
247{
248 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
249 /** @todo in GC we must load it! */
250}
251
252
253VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
254{
255 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs;
256}
257
258
259VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
260{
261 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds;
262}
263
264
265VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
266{
267 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es;
268}
269
270
271VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
272{
273 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs;
274}
275
276
277VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
278{
279 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs;
280}
281
282
283VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
284{
285 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss;
286}
287
288
289VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
290{
291 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eax;
292}
293
294
295VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
296{
297 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebx;
298}
299
300
301VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
302{
303 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ecx;
304}
305
306
307VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
308{
309 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edx;
310}
311
312
313VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
314{
315 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esi;
316}
317
318
319VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
320{
321 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edi;
322}
323
324
325VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
326{
327 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebp;
328}
329
330
331VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
332{
333 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp;
334}
335
336
337VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
338{
339 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
340}
341
342
343VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
344{
345 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip;
346}
347
348
349VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
350{
351 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->rip;
352}
353
354
355VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
356{
357 if (pcbLimit)
358 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
359 return pVCpu->cpum.s.Hyper.idtr.pIdt;
360}
361
362
363VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
364{
365 if (pcbLimit)
366 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
367 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
368}
369
370
371VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
372{
373 return pVCpu->cpum.s.Hyper.ldtr;
374}
375
376
377VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
378{
379 return pVCpu->cpum.s.Hyper.dr[0];
380}
381
382
383VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
384{
385 return pVCpu->cpum.s.Hyper.dr[1];
386}
387
388
389VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
390{
391 return pVCpu->cpum.s.Hyper.dr[2];
392}
393
394
395VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
396{
397 return pVCpu->cpum.s.Hyper.dr[3];
398}
399
400
401VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
402{
403 return pVCpu->cpum.s.Hyper.dr[6];
404}
405
406
407VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
408{
409 return pVCpu->cpum.s.Hyper.dr[7];
410}
411
412
413/**
414 * Gets the pointer to the internal CPUMCTXCORE structure.
415 * This is only for reading in order to save a few calls.
416 *
417 * @param pVCpu Handle to the virtual cpu.
418 */
419VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
420{
421 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
422}
423
424
425/**
426 * Sets the guest context core registers.
427 *
428 * @param pVCpu Handle to the virtual cpu.
429 * @param pCtxCore The new context core values.
430 */
431VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
432{
433 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
434
435 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
436 *pCtxCoreDst = *pCtxCore;
437
438 /* Mask away invalid parts of the cpu context. */
439 if (!CPUMIsGuestInLongMode(pVCpu))
440 {
441 uint64_t u64Mask = UINT64_C(0xffffffff);
442
443 pCtxCoreDst->rip &= u64Mask;
444 pCtxCoreDst->rax &= u64Mask;
445 pCtxCoreDst->rbx &= u64Mask;
446 pCtxCoreDst->rcx &= u64Mask;
447 pCtxCoreDst->rdx &= u64Mask;
448 pCtxCoreDst->rsi &= u64Mask;
449 pCtxCoreDst->rdi &= u64Mask;
450 pCtxCoreDst->rbp &= u64Mask;
451 pCtxCoreDst->rsp &= u64Mask;
452 pCtxCoreDst->rflags.u &= u64Mask;
453
454 pCtxCoreDst->r8 = 0;
455 pCtxCoreDst->r9 = 0;
456 pCtxCoreDst->r10 = 0;
457 pCtxCoreDst->r11 = 0;
458 pCtxCoreDst->r12 = 0;
459 pCtxCoreDst->r13 = 0;
460 pCtxCoreDst->r14 = 0;
461 pCtxCoreDst->r15 = 0;
462 }
463}
464
465
466/**
467 * Queries the pointer to the internal CPUMCTX structure
468 *
469 * @returns The CPUMCTX pointer.
470 * @param pVCpu Handle to the virtual cpu.
471 */
472VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
473{
474 return &pVCpu->cpum.s.Guest;
475}
476
477VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
478{
479 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
480 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
481 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
482 return VINF_SUCCESS;
483}
484
485VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
486{
487 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
488 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
489 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
490 return VINF_SUCCESS;
491}
492
493VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
494{
495 pVCpu->cpum.s.Guest.tr = tr;
496 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
497 return VINF_SUCCESS;
498}
499
500VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
501{
502 pVCpu->cpum.s.Guest.ldtr = ldtr;
503 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
504 return VINF_SUCCESS;
505}
506
507
508/**
509 * Set the guest CR0.
510 *
511 * When called in GC, the hyper CR0 may be updated if that is
512 * required. The caller only has to take special action if AM,
513 * WP, PG or PE changes.
514 *
515 * @returns VINF_SUCCESS (consider it void).
516 * @param pVCpu Handle to the virtual cpu.
517 * @param cr0 The new CR0 value.
518 */
519VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
520{
521#ifdef IN_RC
522 /*
523 * Check if we need to change hypervisor CR0 because
524 * of math stuff.
525 */
526 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
527 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
528 {
529 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
530 {
531 /*
532 * We haven't saved the host FPU state yet, so TS and MT are both set
533 * and EM should be reflecting the guest EM (it always does this).
534 */
535 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
536 {
537 uint32_t HyperCR0 = ASMGetCR0();
538 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
539 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
540 HyperCR0 &= ~X86_CR0_EM;
541 HyperCR0 |= cr0 & X86_CR0_EM;
542 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
543 ASMSetCR0(HyperCR0);
544 }
545# ifdef VBOX_STRICT
546 else
547 {
548 uint32_t HyperCR0 = ASMGetCR0();
549 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
550 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
551 }
552# endif
553 }
554 else
555 {
556 /*
557 * Already saved the state, so we're just mirroring
558 * the guest flags.
559 */
560 uint32_t HyperCR0 = ASMGetCR0();
561 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
562 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
563 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
564 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
565 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
566 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
567 ASMSetCR0(HyperCR0);
568 }
569 }
570#endif /* IN_RC */
571
572 /*
573 * Check for changes causing TLB flushes (for REM).
574 * The caller is responsible for calling PGM when appropriate.
575 */
576 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
577 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
578 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
579 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
580
581 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
582 return VINF_SUCCESS;
583}
584
585
586VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
587{
588 pVCpu->cpum.s.Guest.cr2 = cr2;
589 return VINF_SUCCESS;
590}
591
592
593VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
594{
595 pVCpu->cpum.s.Guest.cr3 = cr3;
596 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
597 return VINF_SUCCESS;
598}
599
600
601VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
602{
603 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
604 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
605 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
606 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
607 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
608 cr4 &= ~X86_CR4_OSFSXR;
609 pVCpu->cpum.s.Guest.cr4 = cr4;
610 return VINF_SUCCESS;
611}
612
613
614VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
615{
616 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
617 return VINF_SUCCESS;
618}
619
620
621VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
622{
623 pVCpu->cpum.s.Guest.eip = eip;
624 return VINF_SUCCESS;
625}
626
627
628VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
629{
630 pVCpu->cpum.s.Guest.eax = eax;
631 return VINF_SUCCESS;
632}
633
634
635VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
636{
637 pVCpu->cpum.s.Guest.ebx = ebx;
638 return VINF_SUCCESS;
639}
640
641
642VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
643{
644 pVCpu->cpum.s.Guest.ecx = ecx;
645 return VINF_SUCCESS;
646}
647
648
649VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
650{
651 pVCpu->cpum.s.Guest.edx = edx;
652 return VINF_SUCCESS;
653}
654
655
656VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
657{
658 pVCpu->cpum.s.Guest.esp = esp;
659 return VINF_SUCCESS;
660}
661
662
663VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
664{
665 pVCpu->cpum.s.Guest.ebp = ebp;
666 return VINF_SUCCESS;
667}
668
669
670VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
671{
672 pVCpu->cpum.s.Guest.esi = esi;
673 return VINF_SUCCESS;
674}
675
676
677VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
678{
679 pVCpu->cpum.s.Guest.edi = edi;
680 return VINF_SUCCESS;
681}
682
683
684VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
685{
686 pVCpu->cpum.s.Guest.ss = ss;
687 return VINF_SUCCESS;
688}
689
690
691VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
692{
693 pVCpu->cpum.s.Guest.cs = cs;
694 return VINF_SUCCESS;
695}
696
697
698VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
699{
700 pVCpu->cpum.s.Guest.ds = ds;
701 return VINF_SUCCESS;
702}
703
704
705VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
706{
707 pVCpu->cpum.s.Guest.es = es;
708 return VINF_SUCCESS;
709}
710
711
712VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
713{
714 pVCpu->cpum.s.Guest.fs = fs;
715 return VINF_SUCCESS;
716}
717
718
719VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
720{
721 pVCpu->cpum.s.Guest.gs = gs;
722 return VINF_SUCCESS;
723}
724
725
726VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
727{
728 pVCpu->cpum.s.Guest.msrEFER = val;
729}
730
731
732/**
733 * Query an MSR.
734 *
735 * The caller is responsible for checking privilege if the call is the result
736 * of a RDMSR instruction. We'll do the rest.
737 *
738 * @retval VINF_SUCCESS on success.
739 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
740 * expected to take the appropriate actions. @a *puValue is set to 0.
741 * @param pVCpu The virtual CPU to operate on.
742 * @param idMsr The MSR.
743 * @param puValue Where to return the value..
744 *
745 * @remarks This will always return the right values, even when we're in the
746 * recompiler.
747 */
748VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
749{
750 /*
751 * If we don't indicate MSR support in the CPUID feature bits, indicate
752 * that a #GP(0) should be raised.
753 */
754 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
755 {
756 *puValue = 0;
757 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
758 }
759
760 int rc = VINF_SUCCESS;
761 uint8_t const u8Multiplier = 4;
762 switch (idMsr)
763 {
764 case MSR_IA32_TSC:
765 *puValue = TMCpuTickGet(pVCpu);
766 break;
767
768 case MSR_IA32_APICBASE:
769 rc = PDMApicGetBase(pVCpu->CTX_SUFF(pVM), puValue);
770 if (RT_SUCCESS(rc))
771 rc = VINF_SUCCESS;
772 else
773 {
774 *puValue = 0;
775 rc = VERR_CPUM_RAISE_GP_0;
776 }
777 break;
778
779 case MSR_IA32_CR_PAT:
780 *puValue = pVCpu->cpum.s.Guest.msrPAT;
781 break;
782
783 case MSR_IA32_SYSENTER_CS:
784 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
785 break;
786
787 case MSR_IA32_SYSENTER_EIP:
788 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
789 break;
790
791 case MSR_IA32_SYSENTER_ESP:
792 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
793 break;
794
795 case MSR_IA32_MTRR_CAP:
796 {
797 /* This is currently a bit weird. :-) */
798 uint8_t const cVariableRangeRegs = 0;
799 bool const fSystemManagementRangeRegisters = false;
800 bool const fFixedRangeRegisters = false;
801 bool const fWriteCombiningType = false;
802 *puValue = cVariableRangeRegs
803 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
804 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
805 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
806 break;
807 }
808
809 case MSR_IA32_MTRR_DEF_TYPE:
810 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
811 break;
812
813 case IA32_MTRR_FIX64K_00000:
814 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
815 break;
816 case IA32_MTRR_FIX16K_80000:
817 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
818 break;
819 case IA32_MTRR_FIX16K_A0000:
820 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
821 break;
822 case IA32_MTRR_FIX4K_C0000:
823 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
824 break;
825 case IA32_MTRR_FIX4K_C8000:
826 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
827 break;
828 case IA32_MTRR_FIX4K_D0000:
829 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
830 break;
831 case IA32_MTRR_FIX4K_D8000:
832 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
833 break;
834 case IA32_MTRR_FIX4K_E0000:
835 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
836 break;
837 case IA32_MTRR_FIX4K_E8000:
838 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
839 break;
840 case IA32_MTRR_FIX4K_F0000:
841 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
842 break;
843 case IA32_MTRR_FIX4K_F8000:
844 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
845 break;
846
847 case MSR_K6_EFER:
848 *puValue = pVCpu->cpum.s.Guest.msrEFER;
849 break;
850
851 case MSR_K8_SF_MASK:
852 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
853 break;
854
855 case MSR_K6_STAR:
856 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
857 break;
858
859 case MSR_K8_LSTAR:
860 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
861 break;
862
863 case MSR_K8_CSTAR:
864 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
865 break;
866
867 case MSR_K8_FS_BASE:
868 *puValue = pVCpu->cpum.s.Guest.fsHid.u64Base;
869 break;
870
871 case MSR_K8_GS_BASE:
872 *puValue = pVCpu->cpum.s.Guest.gsHid.u64Base;
873 break;
874
875 case MSR_K8_KERNEL_GS_BASE:
876 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
877 break;
878
879 case MSR_K8_TSC_AUX:
880 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
881 break;
882
883 case MSR_IA32_PERF_STATUS:
884 /** @todo could really be not exactly correct, maybe use host's values */
885 *puValue = UINT64_C(1000) /* TSC increment by tick */
886 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
887 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
888 break;
889
890 case MSR_IA32_FSB_CLOCK_STS:
891 /*
892 * Encoded as:
893 * 0 - 266
894 * 1 - 133
895 * 2 - 200
896 * 3 - return 166
897 * 5 - return 100
898 */
899 *puValue = (2 << 4);
900 break;
901
902 case MSR_IA32_PLATFORM_INFO:
903 *puValue = (u8Multiplier << 8) /* Flex ratio max */
904 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
905 break;
906
907 case MSR_IA32_THERM_STATUS:
908 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
909 *puValue = RT_BIT(31) /* validity bit */
910 | (UINT64_C(20) << 16) /* degrees till TCC */;
911 break;
912
913 case MSR_IA32_MISC_ENABLE:
914#if 0
915 /* Needs to be tested more before enabling. */
916 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
917#else
918 /* Currenty we don't allow guests to modify enable MSRs. */
919 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
920
921 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
922
923 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
924 /** @todo: add more cpuid-controlled features this way. */
925#endif
926 break;
927
928#if 0 /*def IN_RING0 */
929 case MSR_IA32_PLATFORM_ID:
930 case MSR_IA32_BIOS_SIGN_ID:
931 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
932 {
933 /* Available since the P6 family. VT-x implies that this feature is present. */
934 if (idMsr == MSR_IA32_PLATFORM_ID)
935 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
936 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
937 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
938 break;
939 }
940 /* no break */
941#endif
942
943 default:
944 /* In X2APIC specification this range is reserved for APIC control. */
945 if ( idMsr >= MSR_IA32_APIC_START
946 && idMsr < MSR_IA32_APIC_END)
947 {
948 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
949 if (RT_SUCCESS(rc))
950 rc = VINF_SUCCESS;
951 else
952 {
953 *puValue = 0;
954 rc = VERR_CPUM_RAISE_GP_0;
955 }
956 }
957 else
958 {
959 *puValue = 0;
960 rc = VERR_CPUM_RAISE_GP_0;
961 }
962 break;
963 }
964
965 return rc;
966}
967
968
969/**
970 * Sets the MSR.
971 *
972 * The caller is responsible for checking privilege if the call is the result
973 * of a WRMSR instruction. We'll do the rest.
974 *
975 * @retval VINF_SUCCESS on success.
976 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
977 * appropriate actions.
978 *
979 * @param pVCpu The virtual CPU to operate on.
980 * @param idMsr The MSR id.
981 * @param uValue The value to set.
982 *
983 * @remarks Everyone changing MSR values, including the recompiler, shall do it
984 * by calling this method. This makes sure we have current values and
985 * that we trigger all the right actions when something changes.
986 */
987VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
988{
989 /*
990 * If we don't indicate MSR support in the CPUID feature bits, indicate
991 * that a #GP(0) should be raised.
992 */
993 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
994 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
995
996 int rc = VINF_SUCCESS;
997 switch (idMsr)
998 {
999 case MSR_IA32_MISC_ENABLE:
1000 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
1001 break;
1002
1003 case MSR_IA32_TSC:
1004 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
1005 break;
1006
1007 case MSR_IA32_APICBASE:
1008 rc = PDMApicSetBase(pVCpu->CTX_SUFF(pVM), uValue);
1009 if (rc != VINF_SUCCESS)
1010 rc = VERR_CPUM_RAISE_GP_0;
1011 break;
1012
1013 case MSR_IA32_CR_PAT:
1014 pVCpu->cpum.s.Guest.msrPAT = uValue;
1015 break;
1016
1017 case MSR_IA32_SYSENTER_CS:
1018 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
1019 break;
1020
1021 case MSR_IA32_SYSENTER_EIP:
1022 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
1023 break;
1024
1025 case MSR_IA32_SYSENTER_ESP:
1026 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
1027 break;
1028
1029 case MSR_IA32_MTRR_CAP:
1030 return VERR_CPUM_RAISE_GP_0;
1031
1032 case MSR_IA32_MTRR_DEF_TYPE:
1033 if ( (uValue & UINT64_C(0xfffffffffffff300))
1034 || ( (uValue & 0xff) != 0
1035 && (uValue & 0xff) != 1
1036 && (uValue & 0xff) != 4
1037 && (uValue & 0xff) != 5
1038 && (uValue & 0xff) != 6) )
1039 {
1040 Log(("MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
1041 return VERR_CPUM_RAISE_GP_0;
1042 }
1043 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
1044 break;
1045
1046 case IA32_MTRR_FIX64K_00000:
1047 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1048 break;
1049 case IA32_MTRR_FIX16K_80000:
1050 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1051 break;
1052 case IA32_MTRR_FIX16K_A0000:
1053 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1054 break;
1055 case IA32_MTRR_FIX4K_C0000:
1056 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1057 break;
1058 case IA32_MTRR_FIX4K_C8000:
1059 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1060 break;
1061 case IA32_MTRR_FIX4K_D0000:
1062 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1063 break;
1064 case IA32_MTRR_FIX4K_D8000:
1065 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1066 break;
1067 case IA32_MTRR_FIX4K_E0000:
1068 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1069 break;
1070 case IA32_MTRR_FIX4K_E8000:
1071 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1072 break;
1073 case IA32_MTRR_FIX4K_F0000:
1074 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1075 break;
1076 case IA32_MTRR_FIX4K_F8000:
1077 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1078 break;
1079
1080 case MSR_K6_EFER:
1081 {
1082 PVM pVM = pVCpu->CTX_SUFF(pVM);
1083 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1084 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1085 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1086 : 0;
1087 uint64_t fMask = 0;
1088
1089 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1090 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_NX)
1091 fMask |= MSR_K6_EFER_NXE;
1092 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1093 fMask |= MSR_K6_EFER_LME;
1094 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_SEP)
1095 fMask |= MSR_K6_EFER_SCE;
1096 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1097 fMask |= MSR_K6_EFER_FFXSR;
1098
1099 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1100 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1101 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1102 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1103 {
1104 Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1105 return VERR_CPUM_RAISE_GP_0;
1106 }
1107
1108 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1109 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1110 ("Unexpected value %RX64\n", uValue));
1111 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1112
1113 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1114 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1115 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1116 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1117 {
1118 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1119 HWACCMFlushTLB(pVCpu);
1120
1121 /* Notify PGM about NXE changes. */
1122 if ( (uOldEFER & MSR_K6_EFER_NXE)
1123 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1124 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1125 }
1126 break;
1127 }
1128
1129 case MSR_K8_SF_MASK:
1130 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1131 break;
1132
1133 case MSR_K6_STAR:
1134 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1135 break;
1136
1137 case MSR_K8_LSTAR:
1138 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1139 break;
1140
1141 case MSR_K8_CSTAR:
1142 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1143 break;
1144
1145 case MSR_K8_FS_BASE:
1146 pVCpu->cpum.s.Guest.fsHid.u64Base = uValue;
1147 break;
1148
1149 case MSR_K8_GS_BASE:
1150 pVCpu->cpum.s.Guest.gsHid.u64Base = uValue;
1151 break;
1152
1153 case MSR_K8_KERNEL_GS_BASE:
1154 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1155 break;
1156
1157 case MSR_K8_TSC_AUX:
1158 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1159 break;
1160
1161 default:
1162 /* In X2APIC specification this range is reserved for APIC control. */
1163 if ( idMsr >= MSR_IA32_APIC_START
1164 && idMsr < MSR_IA32_APIC_END)
1165 {
1166 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1167 if (rc != VINF_SUCCESS)
1168 rc = VERR_CPUM_RAISE_GP_0;
1169 }
1170 else
1171 {
1172 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1173 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1174 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1175 }
1176 break;
1177 }
1178 return rc;
1179}
1180
1181
1182VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1183{
1184 if (pcbLimit)
1185 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1186 return pVCpu->cpum.s.Guest.idtr.pIdt;
1187}
1188
1189
1190VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1191{
1192 if (pHidden)
1193 *pHidden = pVCpu->cpum.s.Guest.trHid;
1194 return pVCpu->cpum.s.Guest.tr;
1195}
1196
1197
1198VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1199{
1200 return pVCpu->cpum.s.Guest.cs;
1201}
1202
1203
1204VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1205{
1206 return pVCpu->cpum.s.Guest.ds;
1207}
1208
1209
1210VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1211{
1212 return pVCpu->cpum.s.Guest.es;
1213}
1214
1215
1216VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1217{
1218 return pVCpu->cpum.s.Guest.fs;
1219}
1220
1221
1222VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1223{
1224 return pVCpu->cpum.s.Guest.gs;
1225}
1226
1227
1228VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1229{
1230 return pVCpu->cpum.s.Guest.ss;
1231}
1232
1233
1234VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1235{
1236 return pVCpu->cpum.s.Guest.ldtr;
1237}
1238
1239
1240VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1241{
1242 return pVCpu->cpum.s.Guest.cr0;
1243}
1244
1245
1246VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1247{
1248 return pVCpu->cpum.s.Guest.cr2;
1249}
1250
1251
1252VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1253{
1254 return pVCpu->cpum.s.Guest.cr3;
1255}
1256
1257
1258VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1259{
1260 return pVCpu->cpum.s.Guest.cr4;
1261}
1262
1263
1264VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1265{
1266 uint64_t u64;
1267 int rc = CPUMGetGuestCRx(pVCpu, USE_REG_CR8, &u64);
1268 if (RT_FAILURE(rc))
1269 u64 = 0;
1270 return u64;
1271}
1272
1273
1274VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1275{
1276 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1277}
1278
1279
1280VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1281{
1282 return pVCpu->cpum.s.Guest.eip;
1283}
1284
1285
1286VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1287{
1288 return pVCpu->cpum.s.Guest.rip;
1289}
1290
1291
1292VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1293{
1294 return pVCpu->cpum.s.Guest.eax;
1295}
1296
1297
1298VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1299{
1300 return pVCpu->cpum.s.Guest.ebx;
1301}
1302
1303
1304VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1305{
1306 return pVCpu->cpum.s.Guest.ecx;
1307}
1308
1309
1310VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1311{
1312 return pVCpu->cpum.s.Guest.edx;
1313}
1314
1315
1316VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1317{
1318 return pVCpu->cpum.s.Guest.esi;
1319}
1320
1321
1322VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1323{
1324 return pVCpu->cpum.s.Guest.edi;
1325}
1326
1327
1328VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1329{
1330 return pVCpu->cpum.s.Guest.esp;
1331}
1332
1333
1334VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1335{
1336 return pVCpu->cpum.s.Guest.ebp;
1337}
1338
1339
1340VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1341{
1342 return pVCpu->cpum.s.Guest.eflags.u32;
1343}
1344
1345
1346VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1347{
1348 switch (iReg)
1349 {
1350 case USE_REG_CR0:
1351 *pValue = pVCpu->cpum.s.Guest.cr0;
1352 break;
1353
1354 case USE_REG_CR2:
1355 *pValue = pVCpu->cpum.s.Guest.cr2;
1356 break;
1357
1358 case USE_REG_CR3:
1359 *pValue = pVCpu->cpum.s.Guest.cr3;
1360 break;
1361
1362 case USE_REG_CR4:
1363 *pValue = pVCpu->cpum.s.Guest.cr4;
1364 break;
1365
1366 case USE_REG_CR8:
1367 {
1368 uint8_t u8Tpr;
1369 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /*pfPending*/);
1370 if (RT_FAILURE(rc))
1371 {
1372 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1373 *pValue = 0;
1374 return rc;
1375 }
1376 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1377 break;
1378 }
1379
1380 default:
1381 return VERR_INVALID_PARAMETER;
1382 }
1383 return VINF_SUCCESS;
1384}
1385
1386
1387VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1388{
1389 return pVCpu->cpum.s.Guest.dr[0];
1390}
1391
1392
1393VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1394{
1395 return pVCpu->cpum.s.Guest.dr[1];
1396}
1397
1398
1399VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1400{
1401 return pVCpu->cpum.s.Guest.dr[2];
1402}
1403
1404
1405VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1406{
1407 return pVCpu->cpum.s.Guest.dr[3];
1408}
1409
1410
1411VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1412{
1413 return pVCpu->cpum.s.Guest.dr[6];
1414}
1415
1416
1417VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1418{
1419 return pVCpu->cpum.s.Guest.dr[7];
1420}
1421
1422
1423VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1424{
1425 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1426 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1427 if (iReg == 4 || iReg == 5)
1428 iReg += 2;
1429 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1430 return VINF_SUCCESS;
1431}
1432
1433
1434VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1435{
1436 return pVCpu->cpum.s.Guest.msrEFER;
1437}
1438
1439
1440/**
1441 * Gets a CpuId leaf.
1442 *
1443 * @param pVCpu The VMCPU handle.
1444 * @param iLeaf The CPUID leaf to get.
1445 * @param pEax Where to store the EAX value.
1446 * @param pEbx Where to store the EBX value.
1447 * @param pEcx Where to store the ECX value.
1448 * @param pEdx Where to store the EDX value.
1449 */
1450VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1451{
1452 PVM pVM = pVCpu->CTX_SUFF(pVM);
1453
1454 PCCPUMCPUID pCpuId;
1455 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1456 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1457 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1458 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1459 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1460 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1461 else
1462 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1463
1464 uint32_t cCurrentCacheIndex = *pEcx;
1465
1466 *pEax = pCpuId->eax;
1467 *pEbx = pCpuId->ebx;
1468 *pEcx = pCpuId->ecx;
1469 *pEdx = pCpuId->edx;
1470
1471 if ( iLeaf == 1)
1472 {
1473 /* Bits 31-24: Initial APIC ID */
1474 Assert(pVCpu->idCpu <= 255);
1475 *pEbx |= (pVCpu->idCpu << 24);
1476 }
1477
1478 if ( iLeaf == 4
1479 && cCurrentCacheIndex < 3
1480 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1481 {
1482 uint32_t type, level, sharing, linesize,
1483 partitions, associativity, sets, cores;
1484
1485 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1486 partitions = 1;
1487 /* Those are only to shut up compiler, as they will always
1488 get overwritten, and compiler should be able to figure that out */
1489 sets = associativity = sharing = level = 1;
1490 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1491 switch (cCurrentCacheIndex)
1492 {
1493 case 0:
1494 type = 1;
1495 level = 1;
1496 sharing = 1;
1497 linesize = 64;
1498 associativity = 8;
1499 sets = 64;
1500 break;
1501 case 1:
1502 level = 1;
1503 type = 2;
1504 sharing = 1;
1505 linesize = 64;
1506 associativity = 8;
1507 sets = 64;
1508 break;
1509 default: /* shut up gcc.*/
1510 AssertFailed();
1511 case 2:
1512 level = 2;
1513 type = 3;
1514 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1515 linesize = 64;
1516 associativity = 24;
1517 sets = 4096;
1518 break;
1519 }
1520
1521 *pEax |= ((cores - 1) << 26) |
1522 ((sharing - 1) << 14) |
1523 (level << 5) |
1524 1;
1525 *pEbx = (linesize - 1) |
1526 ((partitions - 1) << 12) |
1527 ((associativity - 1) << 22); /* -1 encoding */
1528 *pEcx = sets - 1;
1529 }
1530
1531 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1532}
1533
1534/**
1535 * Gets a number of standard CPUID leafs.
1536 *
1537 * @returns Number of leafs.
1538 * @param pVM The VM handle.
1539 * @remark Intended for PATM.
1540 */
1541VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1542{
1543 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1544}
1545
1546
1547/**
1548 * Gets a number of extended CPUID leafs.
1549 *
1550 * @returns Number of leafs.
1551 * @param pVM The VM handle.
1552 * @remark Intended for PATM.
1553 */
1554VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1555{
1556 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1557}
1558
1559
1560/**
1561 * Gets a number of centaur CPUID leafs.
1562 *
1563 * @returns Number of leafs.
1564 * @param pVM The VM handle.
1565 * @remark Intended for PATM.
1566 */
1567VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1568{
1569 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1570}
1571
1572
1573/**
1574 * Sets a CPUID feature bit.
1575 *
1576 * @param pVM The VM Handle.
1577 * @param enmFeature The feature to set.
1578 */
1579VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1580{
1581 switch (enmFeature)
1582 {
1583 /*
1584 * Set the APIC bit in both feature masks.
1585 */
1586 case CPUMCPUIDFEATURE_APIC:
1587 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1588 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1589 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1590 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1591 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1592 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1593 break;
1594
1595 /*
1596 * Set the x2APIC bit in the standard feature mask.
1597 */
1598 case CPUMCPUIDFEATURE_X2APIC:
1599 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1600 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1601 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1602 break;
1603
1604 /*
1605 * Set the sysenter/sysexit bit in the standard feature mask.
1606 * Assumes the caller knows what it's doing! (host must support these)
1607 */
1608 case CPUMCPUIDFEATURE_SEP:
1609 {
1610 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1611 {
1612 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1613 return;
1614 }
1615
1616 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1617 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1618 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1619 break;
1620 }
1621
1622 /*
1623 * Set the syscall/sysret bit in the extended feature mask.
1624 * Assumes the caller knows what it's doing! (host must support these)
1625 */
1626 case CPUMCPUIDFEATURE_SYSCALL:
1627 {
1628 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1629 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1630 {
1631#if HC_ARCH_BITS == 32
1632 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1633 * Even when the cpu is capable of doing so in 64 bits mode.
1634 */
1635 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1636 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1637 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1638#endif
1639 {
1640 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1641 return;
1642 }
1643 }
1644 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1645 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1646 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1647 break;
1648 }
1649
1650 /*
1651 * Set the PAE bit in both feature masks.
1652 * Assumes the caller knows what it's doing! (host must support these)
1653 */
1654 case CPUMCPUIDFEATURE_PAE:
1655 {
1656 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1657 {
1658 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1659 return;
1660 }
1661
1662 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1663 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1664 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1665 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1666 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1667 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1668 break;
1669 }
1670
1671 /*
1672 * Set the LONG MODE bit in the extended feature mask.
1673 * Assumes the caller knows what it's doing! (host must support these)
1674 */
1675 case CPUMCPUIDFEATURE_LONG_MODE:
1676 {
1677 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1678 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1679 {
1680 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1681 return;
1682 }
1683
1684 /* Valid for both Intel and AMD. */
1685 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1686 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1687 break;
1688 }
1689
1690 /*
1691 * Set the NXE bit in the extended feature mask.
1692 * Assumes the caller knows what it's doing! (host must support these)
1693 */
1694 case CPUMCPUIDFEATURE_NXE:
1695 {
1696 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1697 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1698 {
1699 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1700 return;
1701 }
1702
1703 /* Valid for both Intel and AMD. */
1704 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1705 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1706 break;
1707 }
1708
1709 case CPUMCPUIDFEATURE_LAHF:
1710 {
1711 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1712 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1713 {
1714 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1715 return;
1716 }
1717
1718 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1719 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1720 break;
1721 }
1722
1723 case CPUMCPUIDFEATURE_PAT:
1724 {
1725 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1726 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1727 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1728 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1729 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1730 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1731 break;
1732 }
1733
1734 case CPUMCPUIDFEATURE_RDTSCP:
1735 {
1736 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1737 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP)
1738 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1739 {
1740 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1741 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1742 return;
1743 }
1744
1745 /* Valid for AMD only (for now). */
1746 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1747 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1748 break;
1749 }
1750
1751 /*
1752 * Set the Hypervisor Present bit in the standard feature mask.
1753 */
1754 case CPUMCPUIDFEATURE_HVP:
1755 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1756 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
1757 LogRel(("CPUMSetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1758 break;
1759
1760 default:
1761 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1762 break;
1763 }
1764 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1765 {
1766 PVMCPU pVCpu = &pVM->aCpus[i];
1767 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1768 }
1769}
1770
1771
1772/**
1773 * Queries a CPUID feature bit.
1774 *
1775 * @returns boolean for feature presence
1776 * @param pVM The VM Handle.
1777 * @param enmFeature The feature to query.
1778 */
1779VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1780{
1781 switch (enmFeature)
1782 {
1783 case CPUMCPUIDFEATURE_PAE:
1784 {
1785 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1786 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1787 break;
1788 }
1789
1790 case CPUMCPUIDFEATURE_NXE:
1791 {
1792 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1793 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_NX);
1794 }
1795
1796 case CPUMCPUIDFEATURE_RDTSCP:
1797 {
1798 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1799 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1800 break;
1801 }
1802
1803 case CPUMCPUIDFEATURE_LONG_MODE:
1804 {
1805 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1806 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1807 break;
1808 }
1809
1810 default:
1811 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1812 break;
1813 }
1814 return false;
1815}
1816
1817
1818/**
1819 * Clears a CPUID feature bit.
1820 *
1821 * @param pVM The VM Handle.
1822 * @param enmFeature The feature to clear.
1823 */
1824VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1825{
1826 switch (enmFeature)
1827 {
1828 /*
1829 * Set the APIC bit in both feature masks.
1830 */
1831 case CPUMCPUIDFEATURE_APIC:
1832 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1833 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1834 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1835 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1836 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1837 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1838 break;
1839
1840 /*
1841 * Clear the x2APIC bit in the standard feature mask.
1842 */
1843 case CPUMCPUIDFEATURE_X2APIC:
1844 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1845 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1846 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1847 break;
1848
1849 case CPUMCPUIDFEATURE_PAE:
1850 {
1851 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1852 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1853 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1854 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1855 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1856 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1857 break;
1858 }
1859
1860 case CPUMCPUIDFEATURE_PAT:
1861 {
1862 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1863 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1864 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1865 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1866 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1867 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1868 break;
1869 }
1870
1871 case CPUMCPUIDFEATURE_LONG_MODE:
1872 {
1873 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1874 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1875 break;
1876 }
1877
1878 case CPUMCPUIDFEATURE_LAHF:
1879 {
1880 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1881 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1882 break;
1883 }
1884
1885 case CPUMCPUIDFEATURE_HVP:
1886 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 1)
1887 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
1888 break;
1889
1890 default:
1891 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1892 break;
1893 }
1894 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1895 {
1896 PVMCPU pVCpu = &pVM->aCpus[i];
1897 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1898 }
1899}
1900
1901
1902/**
1903 * Gets the host CPU vendor
1904 *
1905 * @returns CPU vendor
1906 * @param pVM The VM handle.
1907 */
1908VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1909{
1910 return pVM->cpum.s.enmHostCpuVendor;
1911}
1912
1913/**
1914 * Gets the CPU vendor
1915 *
1916 * @returns CPU vendor
1917 * @param pVM The VM handle.
1918 */
1919VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1920{
1921 return pVM->cpum.s.enmGuestCpuVendor;
1922}
1923
1924
1925VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1926{
1927 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1928 return CPUMRecalcHyperDRx(pVCpu);
1929}
1930
1931
1932VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1933{
1934 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1935 return CPUMRecalcHyperDRx(pVCpu);
1936}
1937
1938
1939VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1940{
1941 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1942 return CPUMRecalcHyperDRx(pVCpu);
1943}
1944
1945
1946VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1947{
1948 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1949 return CPUMRecalcHyperDRx(pVCpu);
1950}
1951
1952
1953VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1954{
1955 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1956 return CPUMRecalcHyperDRx(pVCpu);
1957}
1958
1959
1960VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1961{
1962 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1963 return CPUMRecalcHyperDRx(pVCpu);
1964}
1965
1966
1967VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1968{
1969 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1970 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1971 if (iReg == 4 || iReg == 5)
1972 iReg += 2;
1973 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1974 return CPUMRecalcHyperDRx(pVCpu);
1975}
1976
1977
1978/**
1979 * Recalculates the hypervisor DRx register values based on
1980 * current guest registers and DBGF breakpoints.
1981 *
1982 * This is called whenever a guest DRx register is modified and when DBGF
1983 * sets a hardware breakpoint. In guest context this function will reload
1984 * any (hyper) DRx registers which comes out with a different value.
1985 *
1986 * @returns VINF_SUCCESS.
1987 * @param pVCpu The VMCPU handle.
1988 */
1989VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1990{
1991 PVM pVM = pVCpu->CTX_SUFF(pVM);
1992
1993 /*
1994 * Compare the DR7s first.
1995 *
1996 * We only care about the enabled flags. The GE and LE flags are always
1997 * set and we don't care if the guest doesn't set them. GD is virtualized
1998 * when we dispatch #DB, we never enable it.
1999 */
2000 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
2001#ifdef CPUM_VIRTUALIZE_DRX
2002 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
2003#else
2004 const RTGCUINTREG uGstDr7 = 0;
2005#endif
2006 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
2007 {
2008 /*
2009 * Ok, something is enabled. Recalc each of the breakpoints.
2010 * Straight forward code, not optimized/minimized in any way.
2011 */
2012 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
2013
2014 /* bp 0 */
2015 RTGCUINTREG uNewDr0;
2016 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
2017 {
2018 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2019 uNewDr0 = DBGFBpGetDR0(pVM);
2020 }
2021 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
2022 {
2023 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2024 uNewDr0 = CPUMGetGuestDR0(pVCpu);
2025 }
2026 else
2027 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
2028
2029 /* bp 1 */
2030 RTGCUINTREG uNewDr1;
2031 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
2032 {
2033 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2034 uNewDr1 = DBGFBpGetDR1(pVM);
2035 }
2036 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2037 {
2038 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2039 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2040 }
2041 else
2042 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
2043
2044 /* bp 2 */
2045 RTGCUINTREG uNewDr2;
2046 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2047 {
2048 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2049 uNewDr2 = DBGFBpGetDR2(pVM);
2050 }
2051 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2052 {
2053 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2054 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2055 }
2056 else
2057 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
2058
2059 /* bp 3 */
2060 RTGCUINTREG uNewDr3;
2061 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2062 {
2063 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2064 uNewDr3 = DBGFBpGetDR3(pVM);
2065 }
2066 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2067 {
2068 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2069 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2070 }
2071 else
2072 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
2073
2074 /*
2075 * Apply the updates.
2076 */
2077#ifdef IN_RC
2078 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
2079 {
2080 /** @todo save host DBx registers. */
2081 }
2082#endif
2083 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
2084 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2085 CPUMSetHyperDR3(pVCpu, uNewDr3);
2086 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2087 CPUMSetHyperDR2(pVCpu, uNewDr2);
2088 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2089 CPUMSetHyperDR1(pVCpu, uNewDr1);
2090 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2091 CPUMSetHyperDR0(pVCpu, uNewDr0);
2092 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2093 CPUMSetHyperDR7(pVCpu, uNewDr7);
2094 }
2095 else
2096 {
2097#ifdef IN_RC
2098 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
2099 {
2100 /** @todo restore host DBx registers. */
2101 }
2102#endif
2103 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2104 }
2105 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2106 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2107 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2108 pVCpu->cpum.s.Hyper.dr[7]));
2109
2110 return VINF_SUCCESS;
2111}
2112
2113
2114/**
2115 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2116 *
2117 * @returns true if in real mode, otherwise false.
2118 * @param pVCpu The virtual CPU handle.
2119 */
2120VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2121{
2122 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2123}
2124
2125
2126/**
2127 * Tests if the guest has the Page Size Extension enabled (PSE).
2128 *
2129 * @returns true if in real mode, otherwise false.
2130 * @param pVCpu The virtual CPU handle.
2131 */
2132VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2133{
2134 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2135 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2136}
2137
2138
2139/**
2140 * Tests if the guest has the paging enabled (PG).
2141 *
2142 * @returns true if in real mode, otherwise false.
2143 * @param pVCpu The virtual CPU handle.
2144 */
2145VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2146{
2147 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2148}
2149
2150
2151/**
2152 * Tests if the guest has the paging enabled (PG).
2153 *
2154 * @returns true if in real mode, otherwise false.
2155 * @param pVCpu The virtual CPU handle.
2156 */
2157VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2158{
2159 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2160}
2161
2162
2163/**
2164 * Tests if the guest is running in real mode or not.
2165 *
2166 * @returns true if in real mode, otherwise false.
2167 * @param pVCpu The virtual CPU handle.
2168 */
2169VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2170{
2171 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2172}
2173
2174
2175/**
2176 * Tests if the guest is running in real or virtual 8086 mode.
2177 *
2178 * @returns @c true if it is, @c false if not.
2179 * @param pVCpu The virtual CPU handle.
2180 */
2181VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2182{
2183 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2184 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2185}
2186
2187
2188/**
2189 * Tests if the guest is running in protected or not.
2190 *
2191 * @returns true if in protected mode, otherwise false.
2192 * @param pVCpu The virtual CPU handle.
2193 */
2194VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2195{
2196 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2197}
2198
2199
2200/**
2201 * Tests if the guest is running in paged protected or not.
2202 *
2203 * @returns true if in paged protected mode, otherwise false.
2204 * @param pVCpu The virtual CPU handle.
2205 */
2206VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2207{
2208 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2209}
2210
2211
2212/**
2213 * Tests if the guest is running in long mode or not.
2214 *
2215 * @returns true if in long mode, otherwise false.
2216 * @param pVCpu The virtual CPU handle.
2217 */
2218VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2219{
2220 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2221}
2222
2223
2224/**
2225 * Tests if the guest is running in PAE mode or not.
2226 *
2227 * @returns true if in PAE mode, otherwise false.
2228 * @param pVCpu The virtual CPU handle.
2229 */
2230VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2231{
2232 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2233 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
2234 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2235}
2236
2237
2238#ifndef IN_RING0
2239/**
2240 * Updates the EFLAGS while we're in raw-mode.
2241 *
2242 * @param pVCpu The VMCPU handle.
2243 * @param pCtxCore The context core.
2244 * @param eflags The new EFLAGS value.
2245 */
2246VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
2247{
2248 PVM pVM = pVCpu->CTX_SUFF(pVM);
2249
2250 if (!pVCpu->cpum.s.fRawEntered)
2251 {
2252 pCtxCore->eflags.u32 = eflags;
2253 return;
2254 }
2255 PATMRawSetEFlags(pVM, pCtxCore, eflags);
2256}
2257#endif /* !IN_RING0 */
2258
2259
2260/**
2261 * Gets the EFLAGS while we're in raw-mode.
2262 *
2263 * @returns The eflags.
2264 * @param pVCpu The VMCPU handle.
2265 * @param pCtxCore The context core.
2266 */
2267VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2268{
2269#ifdef IN_RING0
2270 NOREF(pVCpu);
2271 return pCtxCore->eflags.u32;
2272#else
2273 PVM pVM = pVCpu->CTX_SUFF(pVM);
2274
2275 if (!pVCpu->cpum.s.fRawEntered)
2276 return pCtxCore->eflags.u32;
2277 return PATMRawGetEFlags(pVM, pCtxCore);
2278#endif
2279}
2280
2281
2282/**
2283 * Sets the specified changed flags (CPUM_CHANGED_*).
2284 *
2285 * @param pVCpu The VMCPU handle.
2286 */
2287VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2288{
2289 pVCpu->cpum.s.fChanged |= fChangedFlags;
2290}
2291
2292
2293/**
2294 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2295 * @returns true if supported.
2296 * @returns false if not supported.
2297 * @param pVM The VM handle.
2298 */
2299VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2300{
2301 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2302}
2303
2304
2305/**
2306 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2307 * @returns true if used.
2308 * @returns false if not used.
2309 * @param pVM The VM handle.
2310 */
2311VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2312{
2313 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2314}
2315
2316
2317/**
2318 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2319 * @returns true if used.
2320 * @returns false if not used.
2321 * @param pVM The VM handle.
2322 */
2323VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2324{
2325 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2326}
2327
2328#ifndef IN_RING3
2329
2330/**
2331 * Lazily sync in the FPU/XMM state
2332 *
2333 * @returns VBox status code.
2334 * @param pVCpu VMCPU handle
2335 */
2336VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2337{
2338 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2339}
2340
2341#endif /* !IN_RING3 */
2342
2343/**
2344 * Checks if we activated the FPU/XMM state of the guest OS
2345 * @returns true if we did.
2346 * @returns false if not.
2347 * @param pVCpu The VMCPU handle.
2348 */
2349VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2350{
2351 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2352}
2353
2354
2355/**
2356 * Deactivate the FPU/XMM state of the guest OS
2357 * @param pVCpu The VMCPU handle.
2358 */
2359VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2360{
2361 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2362}
2363
2364
2365/**
2366 * Checks if the guest debug state is active
2367 *
2368 * @returns boolean
2369 * @param pVM VM handle.
2370 */
2371VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2372{
2373 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2374}
2375
2376/**
2377 * Checks if the hyper debug state is active
2378 *
2379 * @returns boolean
2380 * @param pVM VM handle.
2381 */
2382VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2383{
2384 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2385}
2386
2387
2388/**
2389 * Mark the guest's debug state as inactive.
2390 *
2391 * @returns boolean
2392 * @param pVM VM handle.
2393 */
2394VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2395{
2396 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2397}
2398
2399
2400/**
2401 * Mark the hypervisor's debug state as inactive.
2402 *
2403 * @returns boolean
2404 * @param pVM VM handle.
2405 */
2406VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2407{
2408 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2409}
2410
2411/**
2412 * Checks if the hidden selector registers are valid for the specified CPU.
2413 *
2414 * @returns true if they are.
2415 * @returns false if not.
2416 * @param pVCpu The VM handle.
2417 */
2418VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVMCPU pVCpu)
2419{
2420 bool const fRc = !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID);
2421 Assert(fRc || !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)));
2422 Assert(!pVCpu->cpum.s.fRemEntered);
2423 return fRc;
2424}
2425
2426
2427
2428/**
2429 * Get the current privilege level of the guest.
2430 *
2431 * @returns cpl
2432 * @param pVM VM Handle.
2433 * @param pRegFrame Trap register frame.
2434 */
2435VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2436{
2437 uint32_t cpl;
2438
2439 if (CPUMAreHiddenSelRegsValid(pVCpu))
2440 {
2441 /*
2442 * The hidden CS.DPL register is always equal to the CPL, it is
2443 * not affected by loading a conforming coding segment.
2444 *
2445 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2446 * at SS. (ACP2 regression during install after a far call to ring 2)
2447 *
2448 * Seems it isn't necessiarly true for newer AMD-V CPUs even, we have
2449 * to move the VMCB.guest.u8CPL into Attr.n.u2Dpl to make this (and
2450 * other) code work right. So, forget CS.DPL, always use SS.DPL.
2451 */
2452 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2453 {
2454 if (!pCtxCore->eflags.Bits.u1VM)
2455 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2456 else
2457 cpl = 3; /* REM doesn't set DPL=3 in V8086 mode. See #5130. */
2458 }
2459 else
2460 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2461 }
2462 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2463 {
2464 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2465 {
2466 /*
2467 * The SS RPL is always equal to the CPL, while the CS RPL
2468 * isn't necessarily equal if the segment is conforming.
2469 * See section 4.11.1 in the AMD manual.
2470 */
2471 cpl = (pCtxCore->ss & X86_SEL_RPL);
2472#ifndef IN_RING0
2473 if (cpl == 1)
2474 cpl = 0;
2475#endif
2476 }
2477 else
2478 cpl = 3;
2479 }
2480 else
2481 cpl = 0; /* real mode; cpl is zero */
2482
2483 return cpl;
2484}
2485
2486
2487/**
2488 * Gets the current guest CPU mode.
2489 *
2490 * If paging mode is what you need, check out PGMGetGuestMode().
2491 *
2492 * @returns The CPU mode.
2493 * @param pVCpu The VMCPU handle.
2494 */
2495VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2496{
2497 CPUMMODE enmMode;
2498 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2499 enmMode = CPUMMODE_REAL;
2500 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2501 enmMode = CPUMMODE_PROTECTED;
2502 else
2503 enmMode = CPUMMODE_LONG;
2504
2505 return enmMode;
2506}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette