VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 30684

Last change on this file since 30684 was 30263, checked in by vboxsync, 15 years ago

VMM,REM: Only invalidate hidden registers when using raw-mode. Fixes save restore during mode switching code like the windows boot menu. (#5057)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 55.0 KB
Line 
1/* $Id: CPUMAllRegs.cpp 30263 2010-06-16 18:31:42Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/cpum.h>
24#include <VBox/patm.h>
25#include <VBox/dbgf.h>
26#include <VBox/mm.h>
27#include "CPUMInternal.h"
28#include <VBox/vm.h>
29#include <VBox/err.h>
30#include <VBox/dis.h>
31#include <VBox/log.h>
32#include <VBox/hwaccm.h>
33#include <VBox/tm.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36#include <iprt/asm-amd64-x86.h>
37#ifdef IN_RING3
38#include <iprt/thread.h>
39#endif
40
41/** Disable stack frame pointer generation here. */
42#if defined(_MSC_VER) && !defined(DEBUG)
43# pragma optimize("y", off)
44#endif
45
46
47/**
48 * Sets or resets an alternative hypervisor context core.
49 *
50 * This is called when we get a hypervisor trap set switch the context
51 * core with the trap frame on the stack. It is called again to reset
52 * back to the default context core when resuming hypervisor execution.
53 *
54 * @param pVCpu The VMCPU handle.
55 * @param pCtxCore Pointer to the alternative context core or NULL
56 * to go back to the default context core.
57 */
58VMMDECL(void) CPUMHyperSetCtxCore(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
59{
60 PVM pVM = pVCpu->CTX_SUFF(pVM);
61
62 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVCpu->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
63 if (!pCtxCore)
64 {
65 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
66 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
67 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
68 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
69 }
70 else
71 {
72 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
73 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
74 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
75 }
76}
77
78
79/**
80 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
81 * This is only for reading in order to save a few calls.
82 *
83 * @param pVM Handle to the virtual machine.
84 */
85VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
86{
87 return pVCpu->cpum.s.CTX_SUFF(pHyperCore);
88}
89
90
91/**
92 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
93 *
94 * @returns VBox status code.
95 * @param pVM Handle to the virtual machine.
96 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
97 *
98 * @deprecated This will *not* (and has never) given the right picture of the
99 * hypervisor register state. With CPUMHyperSetCtxCore() this is
100 * getting much worse. So, use the individual functions for getting
101 * and esp. setting the hypervisor registers.
102 */
103VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx)
104{
105 *ppCtx = &pVCpu->cpum.s.Hyper;
106 return VINF_SUCCESS;
107}
108
109
110VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
111{
112 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
113 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
114 pVCpu->cpum.s.Hyper.gdtrPadding = 0;
115}
116
117
118VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
119{
120 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
121 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
122 pVCpu->cpum.s.Hyper.idtrPadding = 0;
123}
124
125
126VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
127{
128 pVCpu->cpum.s.Hyper.cr3 = cr3;
129
130#ifdef IN_RC
131 /* Update the current CR3. */
132 ASMSetCR3(cr3);
133#endif
134}
135
136VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
137{
138 return pVCpu->cpum.s.Hyper.cr3;
139}
140
141
142VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
143{
144 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
145}
146
147
148VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
149{
150 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
151}
152
153
154VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
155{
156 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
157}
158
159
160VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
161{
162 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
163}
164
165
166VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
167{
168 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
169}
170
171
172VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
173{
174 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
175}
176
177
178VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
179{
180 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
181}
182
183
184VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
185{
186 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
187 return VINF_SUCCESS;
188}
189
190
191VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
192{
193 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
194}
195
196
197VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
198{
199 pVCpu->cpum.s.Hyper.tr = SelTR;
200}
201
202
203VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
204{
205 pVCpu->cpum.s.Hyper.ldtr = SelLDTR;
206}
207
208
209VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
210{
211 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
212 /** @todo in GC we must load it! */
213}
214
215
216VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
217{
218 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
219 /** @todo in GC we must load it! */
220}
221
222
223VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
224{
225 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
226 /** @todo in GC we must load it! */
227}
228
229
230VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
231{
232 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
233 /** @todo in GC we must load it! */
234}
235
236
237VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
238{
239 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
240 /** @todo in GC we must load it! */
241}
242
243
244VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
245{
246 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
247 /** @todo in GC we must load it! */
248}
249
250
251VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
252{
253 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs;
254}
255
256
257VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
258{
259 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds;
260}
261
262
263VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
264{
265 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es;
266}
267
268
269VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
270{
271 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs;
272}
273
274
275VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
276{
277 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs;
278}
279
280
281VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
282{
283 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss;
284}
285
286
287VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
288{
289 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eax;
290}
291
292
293VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
294{
295 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebx;
296}
297
298
299VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
300{
301 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ecx;
302}
303
304
305VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
306{
307 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edx;
308}
309
310
311VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
312{
313 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esi;
314}
315
316
317VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
318{
319 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edi;
320}
321
322
323VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
324{
325 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebp;
326}
327
328
329VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
330{
331 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp;
332}
333
334
335VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
336{
337 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
338}
339
340
341VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
342{
343 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip;
344}
345
346
347VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
348{
349 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->rip;
350}
351
352
353VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
354{
355 if (pcbLimit)
356 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
357 return pVCpu->cpum.s.Hyper.idtr.pIdt;
358}
359
360
361VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
362{
363 if (pcbLimit)
364 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
365 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
366}
367
368
369VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
370{
371 return pVCpu->cpum.s.Hyper.ldtr;
372}
373
374
375VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
376{
377 return pVCpu->cpum.s.Hyper.dr[0];
378}
379
380
381VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
382{
383 return pVCpu->cpum.s.Hyper.dr[1];
384}
385
386
387VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
388{
389 return pVCpu->cpum.s.Hyper.dr[2];
390}
391
392
393VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
394{
395 return pVCpu->cpum.s.Hyper.dr[3];
396}
397
398
399VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
400{
401 return pVCpu->cpum.s.Hyper.dr[6];
402}
403
404
405VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
406{
407 return pVCpu->cpum.s.Hyper.dr[7];
408}
409
410
411/**
412 * Gets the pointer to the internal CPUMCTXCORE structure.
413 * This is only for reading in order to save a few calls.
414 *
415 * @param pVCpu Handle to the virtual cpu.
416 */
417VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
418{
419 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
420}
421
422
423/**
424 * Sets the guest context core registers.
425 *
426 * @param pVCpu Handle to the virtual cpu.
427 * @param pCtxCore The new context core values.
428 */
429VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
430{
431 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
432
433 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
434 *pCtxCoreDst = *pCtxCore;
435
436 /* Mask away invalid parts of the cpu context. */
437 if (!CPUMIsGuestInLongMode(pVCpu))
438 {
439 uint64_t u64Mask = UINT64_C(0xffffffff);
440
441 pCtxCoreDst->rip &= u64Mask;
442 pCtxCoreDst->rax &= u64Mask;
443 pCtxCoreDst->rbx &= u64Mask;
444 pCtxCoreDst->rcx &= u64Mask;
445 pCtxCoreDst->rdx &= u64Mask;
446 pCtxCoreDst->rsi &= u64Mask;
447 pCtxCoreDst->rdi &= u64Mask;
448 pCtxCoreDst->rbp &= u64Mask;
449 pCtxCoreDst->rsp &= u64Mask;
450 pCtxCoreDst->rflags.u &= u64Mask;
451
452 pCtxCoreDst->r8 = 0;
453 pCtxCoreDst->r9 = 0;
454 pCtxCoreDst->r10 = 0;
455 pCtxCoreDst->r11 = 0;
456 pCtxCoreDst->r12 = 0;
457 pCtxCoreDst->r13 = 0;
458 pCtxCoreDst->r14 = 0;
459 pCtxCoreDst->r15 = 0;
460 }
461}
462
463
464/**
465 * Queries the pointer to the internal CPUMCTX structure
466 *
467 * @returns The CPUMCTX pointer.
468 * @param pVCpu Handle to the virtual cpu.
469 */
470VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
471{
472 return &pVCpu->cpum.s.Guest;
473}
474
475VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
476{
477 pVCpu->cpum.s.Guest.gdtr.cbGdt = limit;
478 pVCpu->cpum.s.Guest.gdtr.pGdt = addr;
479 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
480 return VINF_SUCCESS;
481}
482
483VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
484{
485 pVCpu->cpum.s.Guest.idtr.cbIdt = limit;
486 pVCpu->cpum.s.Guest.idtr.pIdt = addr;
487 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
488 return VINF_SUCCESS;
489}
490
491VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
492{
493 AssertMsgFailed(("Need to load the hidden bits too!\n"));
494
495 pVCpu->cpum.s.Guest.tr = tr;
496 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
497 return VINF_SUCCESS;
498}
499
500VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
501{
502 pVCpu->cpum.s.Guest.ldtr = ldtr;
503 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
504 return VINF_SUCCESS;
505}
506
507
508/**
509 * Set the guest CR0.
510 *
511 * When called in GC, the hyper CR0 may be updated if that is
512 * required. The caller only has to take special action if AM,
513 * WP, PG or PE changes.
514 *
515 * @returns VINF_SUCCESS (consider it void).
516 * @param pVCpu Handle to the virtual cpu.
517 * @param cr0 The new CR0 value.
518 */
519VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
520{
521#ifdef IN_RC
522 /*
523 * Check if we need to change hypervisor CR0 because
524 * of math stuff.
525 */
526 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
527 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
528 {
529 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
530 {
531 /*
532 * We haven't saved the host FPU state yet, so TS and MT are both set
533 * and EM should be reflecting the guest EM (it always does this).
534 */
535 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
536 {
537 uint32_t HyperCR0 = ASMGetCR0();
538 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
539 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
540 HyperCR0 &= ~X86_CR0_EM;
541 HyperCR0 |= cr0 & X86_CR0_EM;
542 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
543 ASMSetCR0(HyperCR0);
544 }
545# ifdef VBOX_STRICT
546 else
547 {
548 uint32_t HyperCR0 = ASMGetCR0();
549 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
550 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
551 }
552# endif
553 }
554 else
555 {
556 /*
557 * Already saved the state, so we're just mirroring
558 * the guest flags.
559 */
560 uint32_t HyperCR0 = ASMGetCR0();
561 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
562 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
563 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
564 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
565 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
566 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
567 ASMSetCR0(HyperCR0);
568 }
569 }
570#endif /* IN_RC */
571
572 /*
573 * Check for changes causing TLB flushes (for REM).
574 * The caller is responsible for calling PGM when appropriate.
575 */
576 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
577 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
578 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
579 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
580
581 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
582 return VINF_SUCCESS;
583}
584
585
586VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
587{
588 pVCpu->cpum.s.Guest.cr2 = cr2;
589 return VINF_SUCCESS;
590}
591
592
593VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
594{
595 pVCpu->cpum.s.Guest.cr3 = cr3;
596 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
597 return VINF_SUCCESS;
598}
599
600
601VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
602{
603 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
604 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
605 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
606 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
607 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
608 cr4 &= ~X86_CR4_OSFSXR;
609 pVCpu->cpum.s.Guest.cr4 = cr4;
610 return VINF_SUCCESS;
611}
612
613
614VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
615{
616 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
617 return VINF_SUCCESS;
618}
619
620
621VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
622{
623 pVCpu->cpum.s.Guest.eip = eip;
624 return VINF_SUCCESS;
625}
626
627
628VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
629{
630 pVCpu->cpum.s.Guest.eax = eax;
631 return VINF_SUCCESS;
632}
633
634
635VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
636{
637 pVCpu->cpum.s.Guest.ebx = ebx;
638 return VINF_SUCCESS;
639}
640
641
642VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
643{
644 pVCpu->cpum.s.Guest.ecx = ecx;
645 return VINF_SUCCESS;
646}
647
648
649VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
650{
651 pVCpu->cpum.s.Guest.edx = edx;
652 return VINF_SUCCESS;
653}
654
655
656VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
657{
658 pVCpu->cpum.s.Guest.esp = esp;
659 return VINF_SUCCESS;
660}
661
662
663VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
664{
665 pVCpu->cpum.s.Guest.ebp = ebp;
666 return VINF_SUCCESS;
667}
668
669
670VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
671{
672 pVCpu->cpum.s.Guest.esi = esi;
673 return VINF_SUCCESS;
674}
675
676
677VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
678{
679 pVCpu->cpum.s.Guest.edi = edi;
680 return VINF_SUCCESS;
681}
682
683
684VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
685{
686 pVCpu->cpum.s.Guest.ss = ss;
687 return VINF_SUCCESS;
688}
689
690
691VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
692{
693 pVCpu->cpum.s.Guest.cs = cs;
694 return VINF_SUCCESS;
695}
696
697
698VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
699{
700 pVCpu->cpum.s.Guest.ds = ds;
701 return VINF_SUCCESS;
702}
703
704
705VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
706{
707 pVCpu->cpum.s.Guest.es = es;
708 return VINF_SUCCESS;
709}
710
711
712VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
713{
714 pVCpu->cpum.s.Guest.fs = fs;
715 return VINF_SUCCESS;
716}
717
718
719VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
720{
721 pVCpu->cpum.s.Guest.gs = gs;
722 return VINF_SUCCESS;
723}
724
725
726VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
727{
728 pVCpu->cpum.s.Guest.msrEFER = val;
729}
730
731
732VMMDECL(uint64_t) CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
733{
734 uint64_t u64 = 0;
735 uint8_t u8Multiplier = 4;
736
737 switch (idMsr)
738 {
739 case MSR_IA32_TSC:
740 u64 = TMCpuTickGet(pVCpu);
741 break;
742
743 case MSR_IA32_CR_PAT:
744 u64 = pVCpu->cpum.s.Guest.msrPAT;
745 break;
746
747 case MSR_IA32_SYSENTER_CS:
748 u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
749 break;
750
751 case MSR_IA32_SYSENTER_EIP:
752 u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
753 break;
754
755 case MSR_IA32_SYSENTER_ESP:
756 u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
757 break;
758
759 case MSR_K6_EFER:
760 u64 = pVCpu->cpum.s.Guest.msrEFER;
761 break;
762
763 case MSR_K8_SF_MASK:
764 u64 = pVCpu->cpum.s.Guest.msrSFMASK;
765 break;
766
767 case MSR_K6_STAR:
768 u64 = pVCpu->cpum.s.Guest.msrSTAR;
769 break;
770
771 case MSR_K8_LSTAR:
772 u64 = pVCpu->cpum.s.Guest.msrLSTAR;
773 break;
774
775 case MSR_K8_CSTAR:
776 u64 = pVCpu->cpum.s.Guest.msrCSTAR;
777 break;
778
779 case MSR_K8_KERNEL_GS_BASE:
780 u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
781 break;
782
783 case MSR_K8_TSC_AUX:
784 u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
785 break;
786
787 case MSR_IA32_PERF_STATUS:
788 /** @todo: could really be not exactly correct, maybe use host's values */
789 /* Keep consistent with helper_rdmsr() in REM */
790 u64 = (1000ULL /* TSC increment by tick */)
791 | ((uint64_t)u8Multiplier << 24 /* CPU multiplier (aka bus ratio) min */ )
792 | ((uint64_t)u8Multiplier << 40 /* CPU multiplier (aka bus ratio) max */ );
793 break;
794
795 case MSR_IA32_FSB_CLOCK_STS:
796 /**
797 * Encoded as:
798 * 0 - 266
799 * 1 - 133
800 * 2 - 200
801 * 3 - return 166
802 * 5 - return 100
803 */
804 u64 = (2 << 4);
805 break;
806
807 case MSR_IA32_PLATFORM_INFO:
808 u64 = ((u8Multiplier)<<8 /* Flex ratio max */)
809 | ((uint64_t)u8Multiplier << 40 /* Flex ratio min */ );
810 break;
811
812 case MSR_IA32_THERM_STATUS:
813 /* CPU temperature reltive to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
814 u64 = (1 << 31) /* validity bit */ |
815 (20 << 16) /* degrees till TCC */;
816 break;
817
818 case MSR_IA32_MISC_ENABLE:
819#if 0
820 /* Needs to be tested more before enabling. */
821 u64 = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
822#else
823 u64 = 0;
824#endif
825 break;
826
827 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
828 default:
829 AssertFailed();
830 break;
831 }
832 return u64;
833}
834
835VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
836{
837 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
838 switch (idMsr)
839 {
840 case MSR_K8_TSC_AUX:
841 pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
842 break;
843
844 case MSR_IA32_MISC_ENABLE:
845 pVCpu->cpum.s.GuestMsr.msr.miscEnable = valMsr;
846 break;
847
848 default:
849 AssertFailed();
850 break;
851 }
852}
853
854VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
855{
856 if (pcbLimit)
857 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
858 return pVCpu->cpum.s.Guest.idtr.pIdt;
859}
860
861
862VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
863{
864 if (pHidden)
865 *pHidden = pVCpu->cpum.s.Guest.trHid;
866 return pVCpu->cpum.s.Guest.tr;
867}
868
869
870VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
871{
872 return pVCpu->cpum.s.Guest.cs;
873}
874
875
876VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
877{
878 return pVCpu->cpum.s.Guest.ds;
879}
880
881
882VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
883{
884 return pVCpu->cpum.s.Guest.es;
885}
886
887
888VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
889{
890 return pVCpu->cpum.s.Guest.fs;
891}
892
893
894VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
895{
896 return pVCpu->cpum.s.Guest.gs;
897}
898
899
900VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
901{
902 return pVCpu->cpum.s.Guest.ss;
903}
904
905
906VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
907{
908 return pVCpu->cpum.s.Guest.ldtr;
909}
910
911
912VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
913{
914 return pVCpu->cpum.s.Guest.cr0;
915}
916
917
918VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
919{
920 return pVCpu->cpum.s.Guest.cr2;
921}
922
923
924VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
925{
926 return pVCpu->cpum.s.Guest.cr3;
927}
928
929
930VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
931{
932 return pVCpu->cpum.s.Guest.cr4;
933}
934
935
936VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
937{
938 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
939}
940
941
942VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
943{
944 return pVCpu->cpum.s.Guest.eip;
945}
946
947
948VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
949{
950 return pVCpu->cpum.s.Guest.rip;
951}
952
953
954VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
955{
956 return pVCpu->cpum.s.Guest.eax;
957}
958
959
960VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
961{
962 return pVCpu->cpum.s.Guest.ebx;
963}
964
965
966VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
967{
968 return pVCpu->cpum.s.Guest.ecx;
969}
970
971
972VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
973{
974 return pVCpu->cpum.s.Guest.edx;
975}
976
977
978VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
979{
980 return pVCpu->cpum.s.Guest.esi;
981}
982
983
984VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
985{
986 return pVCpu->cpum.s.Guest.edi;
987}
988
989
990VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
991{
992 return pVCpu->cpum.s.Guest.esp;
993}
994
995
996VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
997{
998 return pVCpu->cpum.s.Guest.ebp;
999}
1000
1001
1002VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1003{
1004 return pVCpu->cpum.s.Guest.eflags.u32;
1005}
1006
1007
1008///@todo: crx should be an array
1009VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1010{
1011 switch (iReg)
1012 {
1013 case USE_REG_CR0:
1014 *pValue = pVCpu->cpum.s.Guest.cr0;
1015 break;
1016 case USE_REG_CR2:
1017 *pValue = pVCpu->cpum.s.Guest.cr2;
1018 break;
1019 case USE_REG_CR3:
1020 *pValue = pVCpu->cpum.s.Guest.cr3;
1021 break;
1022 case USE_REG_CR4:
1023 *pValue = pVCpu->cpum.s.Guest.cr4;
1024 break;
1025 default:
1026 return VERR_INVALID_PARAMETER;
1027 }
1028 return VINF_SUCCESS;
1029}
1030
1031
1032VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1033{
1034 return pVCpu->cpum.s.Guest.dr[0];
1035}
1036
1037
1038VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1039{
1040 return pVCpu->cpum.s.Guest.dr[1];
1041}
1042
1043
1044VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1045{
1046 return pVCpu->cpum.s.Guest.dr[2];
1047}
1048
1049
1050VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1051{
1052 return pVCpu->cpum.s.Guest.dr[3];
1053}
1054
1055
1056VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1057{
1058 return pVCpu->cpum.s.Guest.dr[6];
1059}
1060
1061
1062VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1063{
1064 return pVCpu->cpum.s.Guest.dr[7];
1065}
1066
1067
1068VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1069{
1070 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1071 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1072 if (iReg == 4 || iReg == 5)
1073 iReg += 2;
1074 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1075 return VINF_SUCCESS;
1076}
1077
1078
1079VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1080{
1081 return pVCpu->cpum.s.Guest.msrEFER;
1082}
1083
1084
1085/**
1086 * Gets a CpuId leaf.
1087 *
1088 * @param pVCpu The VMCPU handle.
1089 * @param iLeaf The CPUID leaf to get.
1090 * @param pEax Where to store the EAX value.
1091 * @param pEbx Where to store the EBX value.
1092 * @param pEcx Where to store the ECX value.
1093 * @param pEdx Where to store the EDX value.
1094 */
1095VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1096{
1097 PVM pVM = pVCpu->CTX_SUFF(pVM);
1098
1099 PCCPUMCPUID pCpuId;
1100 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1101 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1102 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1103 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1104 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1105 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1106 else
1107 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1108
1109 uint32_t cCurrentCacheIndex = *pEcx;
1110
1111 *pEax = pCpuId->eax;
1112 *pEbx = pCpuId->ebx;
1113 *pEcx = pCpuId->ecx;
1114 *pEdx = pCpuId->edx;
1115
1116 if ( iLeaf == 1)
1117 {
1118 /* Bits 31-24: Initial APIC ID */
1119 Assert(pVCpu->idCpu <= 255);
1120 *pEbx |= (pVCpu->idCpu << 24);
1121 }
1122
1123 if ( iLeaf == 4
1124 && cCurrentCacheIndex < 3
1125 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1126 {
1127 uint32_t type, level, sharing, linesize,
1128 partitions, associativity, sets, cores;
1129
1130 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1131 partitions = 1;
1132 /* Those are only to shut up compiler, as they will always
1133 get overwritten, and compiler should be able to figure that out */
1134 sets = associativity = sharing = level = 1;
1135 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1136 switch (cCurrentCacheIndex)
1137 {
1138 case 0:
1139 type = 1;
1140 level = 1;
1141 sharing = 1;
1142 linesize = 64;
1143 associativity = 8;
1144 sets = 64;
1145 break;
1146 case 1:
1147 level = 1;
1148 type = 2;
1149 sharing = 1;
1150 linesize = 64;
1151 associativity = 8;
1152 sets = 64;
1153 break;
1154 default: /* shut up gcc.*/
1155 AssertFailed();
1156 case 2:
1157 level = 2;
1158 type = 3;
1159 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1160 linesize = 64;
1161 associativity = 24;
1162 sets = 4096;
1163 break;
1164 }
1165
1166 *pEax |= ((cores - 1) << 26) |
1167 ((sharing - 1) << 14) |
1168 (level << 5) |
1169 1;
1170 *pEbx = (linesize - 1) |
1171 ((partitions - 1) << 12) |
1172 ((associativity - 1) << 22); /* -1 encoding */
1173 *pEcx = sets - 1;
1174 }
1175
1176 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1177}
1178
1179/**
1180 * Gets a number of standard CPUID leafs.
1181 *
1182 * @returns Number of leafs.
1183 * @param pVM The VM handle.
1184 * @remark Intended for PATM.
1185 */
1186VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1187{
1188 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1189}
1190
1191
1192/**
1193 * Gets a number of extended CPUID leafs.
1194 *
1195 * @returns Number of leafs.
1196 * @param pVM The VM handle.
1197 * @remark Intended for PATM.
1198 */
1199VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1200{
1201 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1202}
1203
1204
1205/**
1206 * Gets a number of centaur CPUID leafs.
1207 *
1208 * @returns Number of leafs.
1209 * @param pVM The VM handle.
1210 * @remark Intended for PATM.
1211 */
1212VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1213{
1214 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1215}
1216
1217
1218/**
1219 * Sets a CPUID feature bit.
1220 *
1221 * @param pVM The VM Handle.
1222 * @param enmFeature The feature to set.
1223 */
1224VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1225{
1226 switch (enmFeature)
1227 {
1228 /*
1229 * Set the APIC bit in both feature masks.
1230 */
1231 case CPUMCPUIDFEATURE_APIC:
1232 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1233 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1234 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1235 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1236 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1237 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1238 break;
1239
1240 /*
1241 * Set the x2APIC bit in the standard feature mask.
1242 */
1243 case CPUMCPUIDFEATURE_X2APIC:
1244 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1245 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1246 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1247 break;
1248
1249 /*
1250 * Set the sysenter/sysexit bit in the standard feature mask.
1251 * Assumes the caller knows what it's doing! (host must support these)
1252 */
1253 case CPUMCPUIDFEATURE_SEP:
1254 {
1255 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1256 {
1257 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1258 return;
1259 }
1260
1261 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1262 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1263 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1264 break;
1265 }
1266
1267 /*
1268 * Set the syscall/sysret bit in the extended feature mask.
1269 * Assumes the caller knows what it's doing! (host must support these)
1270 */
1271 case CPUMCPUIDFEATURE_SYSCALL:
1272 {
1273 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1274 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1275 {
1276#if HC_ARCH_BITS == 32
1277 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1278 * Even when the cpu is capable of doing so in 64 bits mode.
1279 */
1280 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1281 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1282 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1283#endif
1284 {
1285 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1286 return;
1287 }
1288 }
1289 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1290 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1291 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1292 break;
1293 }
1294
1295 /*
1296 * Set the PAE bit in both feature masks.
1297 * Assumes the caller knows what it's doing! (host must support these)
1298 */
1299 case CPUMCPUIDFEATURE_PAE:
1300 {
1301 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1302 {
1303 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1304 return;
1305 }
1306
1307 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1308 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1309 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1310 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1311 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1312 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1313 break;
1314 }
1315
1316 /*
1317 * Set the LONG MODE bit in the extended feature mask.
1318 * Assumes the caller knows what it's doing! (host must support these)
1319 */
1320 case CPUMCPUIDFEATURE_LONG_MODE:
1321 {
1322 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1323 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1324 {
1325 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1326 return;
1327 }
1328
1329 /* Valid for both Intel and AMD. */
1330 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1331 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1332 break;
1333 }
1334
1335 /*
1336 * Set the NXE bit in the extended feature mask.
1337 * Assumes the caller knows what it's doing! (host must support these)
1338 */
1339 case CPUMCPUIDFEATURE_NXE:
1340 {
1341 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1342 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1343 {
1344 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1345 return;
1346 }
1347
1348 /* Valid for both Intel and AMD. */
1349 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1350 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1351 break;
1352 }
1353
1354 case CPUMCPUIDFEATURE_LAHF:
1355 {
1356 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1357 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1358 {
1359 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1360 return;
1361 }
1362
1363 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1364 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1365 break;
1366 }
1367
1368 case CPUMCPUIDFEATURE_PAT:
1369 {
1370 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1371 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1372 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1373 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1374 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1375 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1376 break;
1377 }
1378
1379 case CPUMCPUIDFEATURE_RDTSCP:
1380 {
1381 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1382 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP)
1383 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1384 {
1385 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1386 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1387 return;
1388 }
1389
1390 /* Valid for AMD only (for now). */
1391 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1392 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1393 break;
1394 }
1395
1396 default:
1397 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1398 break;
1399 }
1400 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1401 {
1402 PVMCPU pVCpu = &pVM->aCpus[i];
1403 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1404 }
1405}
1406
1407
1408/**
1409 * Queries a CPUID feature bit.
1410 *
1411 * @returns boolean for feature presence
1412 * @param pVM The VM Handle.
1413 * @param enmFeature The feature to query.
1414 */
1415VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1416{
1417 switch (enmFeature)
1418 {
1419 case CPUMCPUIDFEATURE_PAE:
1420 {
1421 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1422 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1423 break;
1424 }
1425
1426 case CPUMCPUIDFEATURE_NXE:
1427 {
1428 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1429 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_NX);
1430 }
1431
1432 case CPUMCPUIDFEATURE_RDTSCP:
1433 {
1434 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1435 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1436 break;
1437 }
1438
1439 case CPUMCPUIDFEATURE_LONG_MODE:
1440 {
1441 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1442 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1443 break;
1444 }
1445
1446 default:
1447 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1448 break;
1449 }
1450 return false;
1451}
1452
1453
1454/**
1455 * Clears a CPUID feature bit.
1456 *
1457 * @param pVM The VM Handle.
1458 * @param enmFeature The feature to clear.
1459 */
1460VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1461{
1462 switch (enmFeature)
1463 {
1464 /*
1465 * Set the APIC bit in both feature masks.
1466 */
1467 case CPUMCPUIDFEATURE_APIC:
1468 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1469 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1470 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1471 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1472 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1473 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1474 break;
1475
1476 /*
1477 * Clear the x2APIC bit in the standard feature mask.
1478 */
1479 case CPUMCPUIDFEATURE_X2APIC:
1480 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1481 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1482 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1483 break;
1484
1485 case CPUMCPUIDFEATURE_PAE:
1486 {
1487 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1488 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1489 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1490 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1491 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1492 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1493 break;
1494 }
1495
1496 case CPUMCPUIDFEATURE_PAT:
1497 {
1498 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1499 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1500 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1501 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1502 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1503 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1504 break;
1505 }
1506
1507 case CPUMCPUIDFEATURE_LONG_MODE:
1508 {
1509 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1510 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1511 break;
1512 }
1513
1514 case CPUMCPUIDFEATURE_LAHF:
1515 {
1516 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1517 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1518 break;
1519 }
1520
1521 default:
1522 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1523 break;
1524 }
1525 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1526 {
1527 PVMCPU pVCpu = &pVM->aCpus[i];
1528 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1529 }
1530}
1531
1532
1533/**
1534 * Gets the host CPU vendor
1535 *
1536 * @returns CPU vendor
1537 * @param pVM The VM handle.
1538 */
1539VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1540{
1541 return pVM->cpum.s.enmHostCpuVendor;
1542}
1543
1544/**
1545 * Gets the CPU vendor
1546 *
1547 * @returns CPU vendor
1548 * @param pVM The VM handle.
1549 */
1550VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1551{
1552 return pVM->cpum.s.enmGuestCpuVendor;
1553}
1554
1555
1556VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1557{
1558 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1559 return CPUMRecalcHyperDRx(pVCpu);
1560}
1561
1562
1563VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1564{
1565 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1566 return CPUMRecalcHyperDRx(pVCpu);
1567}
1568
1569
1570VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1571{
1572 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1573 return CPUMRecalcHyperDRx(pVCpu);
1574}
1575
1576
1577VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1578{
1579 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1580 return CPUMRecalcHyperDRx(pVCpu);
1581}
1582
1583
1584VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1585{
1586 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1587 return CPUMRecalcHyperDRx(pVCpu);
1588}
1589
1590
1591VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1592{
1593 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1594 return CPUMRecalcHyperDRx(pVCpu);
1595}
1596
1597
1598VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1599{
1600 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1601 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1602 if (iReg == 4 || iReg == 5)
1603 iReg += 2;
1604 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1605 return CPUMRecalcHyperDRx(pVCpu);
1606}
1607
1608
1609/**
1610 * Recalculates the hypvervisor DRx register values based on
1611 * current guest registers and DBGF breakpoints.
1612 *
1613 * This is called whenever a guest DRx register is modified and when DBGF
1614 * sets a hardware breakpoint. In guest context this function will reload
1615 * any (hyper) DRx registers which comes out with a different value.
1616 *
1617 * @returns VINF_SUCCESS.
1618 * @param pVCpu The VMCPU handle.
1619 */
1620VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1621{
1622 PVM pVM = pVCpu->CTX_SUFF(pVM);
1623
1624 /*
1625 * Compare the DR7s first.
1626 *
1627 * We only care about the enabled flags. The GE and LE flags are always
1628 * set and we don't care if the guest doesn't set them. GD is virtualized
1629 * when we dispatch #DB, we never enable it.
1630 */
1631 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1632#ifdef CPUM_VIRTUALIZE_DRX
1633 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1634#else
1635 const RTGCUINTREG uGstDr7 = 0;
1636#endif
1637 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1638 {
1639 /*
1640 * Ok, something is enabled. Recalc each of the breakpoints.
1641 * Straight forward code, not optimized/minimized in any way.
1642 */
1643 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1644
1645 /* bp 0 */
1646 RTGCUINTREG uNewDr0;
1647 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1648 {
1649 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1650 uNewDr0 = DBGFBpGetDR0(pVM);
1651 }
1652 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1653 {
1654 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1655 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1656 }
1657 else
1658 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
1659
1660 /* bp 1 */
1661 RTGCUINTREG uNewDr1;
1662 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1663 {
1664 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1665 uNewDr1 = DBGFBpGetDR1(pVM);
1666 }
1667 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1668 {
1669 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1670 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1671 }
1672 else
1673 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
1674
1675 /* bp 2 */
1676 RTGCUINTREG uNewDr2;
1677 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1678 {
1679 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1680 uNewDr2 = DBGFBpGetDR2(pVM);
1681 }
1682 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1683 {
1684 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1685 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1686 }
1687 else
1688 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
1689
1690 /* bp 3 */
1691 RTGCUINTREG uNewDr3;
1692 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1693 {
1694 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1695 uNewDr3 = DBGFBpGetDR3(pVM);
1696 }
1697 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1698 {
1699 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1700 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1701 }
1702 else
1703 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
1704
1705 /*
1706 * Apply the updates.
1707 */
1708#ifdef IN_RC
1709 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1710 {
1711 /** @todo save host DBx registers. */
1712 }
1713#endif
1714 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1715 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1716 CPUMSetHyperDR3(pVCpu, uNewDr3);
1717 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1718 CPUMSetHyperDR2(pVCpu, uNewDr2);
1719 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1720 CPUMSetHyperDR1(pVCpu, uNewDr1);
1721 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1722 CPUMSetHyperDR0(pVCpu, uNewDr0);
1723 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1724 CPUMSetHyperDR7(pVCpu, uNewDr7);
1725 }
1726 else
1727 {
1728#ifdef IN_RC
1729 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1730 {
1731 /** @todo restore host DBx registers. */
1732 }
1733#endif
1734 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1735 }
1736 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1737 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1738 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1739 pVCpu->cpum.s.Hyper.dr[7]));
1740
1741 return VINF_SUCCESS;
1742}
1743
1744
1745/**
1746 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1747 *
1748 * @returns true if in real mode, otherwise false.
1749 * @param pVCpu The virtual CPU handle.
1750 */
1751VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1752{
1753 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1754}
1755
1756
1757/**
1758 * Tests if the guest has the Page Size Extension enabled (PSE).
1759 *
1760 * @returns true if in real mode, otherwise false.
1761 * @param pVCpu The virtual CPU handle.
1762 */
1763VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1764{
1765 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1766 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1767}
1768
1769
1770/**
1771 * Tests if the guest has the paging enabled (PG).
1772 *
1773 * @returns true if in real mode, otherwise false.
1774 * @param pVCpu The virtual CPU handle.
1775 */
1776VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1777{
1778 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1779}
1780
1781
1782/**
1783 * Tests if the guest has the paging enabled (PG).
1784 *
1785 * @returns true if in real mode, otherwise false.
1786 * @param pVCpu The virtual CPU handle.
1787 */
1788VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1789{
1790 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1791}
1792
1793
1794/**
1795 * Tests if the guest is running in real mode or not.
1796 *
1797 * @returns true if in real mode, otherwise false.
1798 * @param pVCpu The virtual CPU handle.
1799 */
1800VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1801{
1802 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1803}
1804
1805
1806/**
1807 * Tests if the guest is running in protected or not.
1808 *
1809 * @returns true if in protected mode, otherwise false.
1810 * @param pVCpu The virtual CPU handle.
1811 */
1812VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1813{
1814 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1815}
1816
1817
1818/**
1819 * Tests if the guest is running in paged protected or not.
1820 *
1821 * @returns true if in paged protected mode, otherwise false.
1822 * @param pVCpu The virtual CPU handle.
1823 */
1824VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1825{
1826 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1827}
1828
1829
1830/**
1831 * Tests if the guest is running in long mode or not.
1832 *
1833 * @returns true if in long mode, otherwise false.
1834 * @param pVCpu The virtual CPU handle.
1835 */
1836VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1837{
1838 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1839}
1840
1841
1842/**
1843 * Tests if the guest is running in PAE mode or not.
1844 *
1845 * @returns true if in PAE mode, otherwise false.
1846 * @param pVCpu The virtual CPU handle.
1847 */
1848VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1849{
1850 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1851 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
1852 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1853}
1854
1855
1856#ifndef IN_RING0
1857/**
1858 * Updates the EFLAGS while we're in raw-mode.
1859 *
1860 * @param pVCpu The VMCPU handle.
1861 * @param pCtxCore The context core.
1862 * @param eflags The new EFLAGS value.
1863 */
1864VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1865{
1866 PVM pVM = pVCpu->CTX_SUFF(pVM);
1867
1868 if (!pVCpu->cpum.s.fRawEntered)
1869 {
1870 pCtxCore->eflags.u32 = eflags;
1871 return;
1872 }
1873 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1874}
1875#endif /* !IN_RING0 */
1876
1877
1878/**
1879 * Gets the EFLAGS while we're in raw-mode.
1880 *
1881 * @returns The eflags.
1882 * @param pVCpu The VMCPU handle.
1883 * @param pCtxCore The context core.
1884 */
1885VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1886{
1887#ifdef IN_RING0
1888 return pCtxCore->eflags.u32;
1889#else
1890 PVM pVM = pVCpu->CTX_SUFF(pVM);
1891
1892 if (!pVCpu->cpum.s.fRawEntered)
1893 return pCtxCore->eflags.u32;
1894 return PATMRawGetEFlags(pVM, pCtxCore);
1895#endif
1896}
1897
1898
1899/**
1900 * Sets the specified changed flags (CPUM_CHANGED_*).
1901 *
1902 * @param pVCpu The VMCPU handle.
1903 */
1904VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
1905{
1906 pVCpu->cpum.s.fChanged |= fChangedFlags;
1907}
1908
1909
1910/**
1911 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1912 * @returns true if supported.
1913 * @returns false if not supported.
1914 * @param pVM The VM handle.
1915 */
1916VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1917{
1918 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1919}
1920
1921
1922/**
1923 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1924 * @returns true if used.
1925 * @returns false if not used.
1926 * @param pVM The VM handle.
1927 */
1928VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1929{
1930 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
1931}
1932
1933
1934/**
1935 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1936 * @returns true if used.
1937 * @returns false if not used.
1938 * @param pVM The VM handle.
1939 */
1940VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1941{
1942 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
1943}
1944
1945#ifndef IN_RING3
1946
1947/**
1948 * Lazily sync in the FPU/XMM state
1949 *
1950 * @returns VBox status code.
1951 * @param pVCpu VMCPU handle
1952 */
1953VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
1954{
1955 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
1956}
1957
1958#endif /* !IN_RING3 */
1959
1960/**
1961 * Checks if we activated the FPU/XMM state of the guest OS
1962 * @returns true if we did.
1963 * @returns false if not.
1964 * @param pVCpu The VMCPU handle.
1965 */
1966VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1967{
1968 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1969}
1970
1971
1972/**
1973 * Deactivate the FPU/XMM state of the guest OS
1974 * @param pVCpu The VMCPU handle.
1975 */
1976VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
1977{
1978 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1979}
1980
1981
1982/**
1983 * Checks if the guest debug state is active
1984 *
1985 * @returns boolean
1986 * @param pVM VM handle.
1987 */
1988VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1989{
1990 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
1991}
1992
1993/**
1994 * Checks if the hyper debug state is active
1995 *
1996 * @returns boolean
1997 * @param pVM VM handle.
1998 */
1999VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2000{
2001 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2002}
2003
2004
2005/**
2006 * Mark the guest's debug state as inactive.
2007 *
2008 * @returns boolean
2009 * @param pVM VM handle.
2010 */
2011VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2012{
2013 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2014}
2015
2016
2017/**
2018 * Mark the hypervisor's debug state as inactive.
2019 *
2020 * @returns boolean
2021 * @param pVM VM handle.
2022 */
2023VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2024{
2025 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2026}
2027
2028/**
2029 * Checks if the hidden selector registers are valid for the specified CPU.
2030 *
2031 * @returns true if they are.
2032 * @returns false if not.
2033 * @param pVCpu The VM handle.
2034 */
2035VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVMCPU pVCpu)
2036{
2037 bool const fRc = !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID);
2038 Assert(fRc || !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)));
2039 Assert(!pVCpu->cpum.s.fRemEntered);
2040 return fRc;
2041}
2042
2043
2044
2045/**
2046 * Get the current privilege level of the guest.
2047 *
2048 * @returns cpl
2049 * @param pVM VM Handle.
2050 * @param pRegFrame Trap register frame.
2051 */
2052VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2053{
2054 uint32_t cpl;
2055
2056 if (CPUMAreHiddenSelRegsValid(pVCpu))
2057 {
2058 /*
2059 * The hidden CS.DPL register is always equal to the CPL, it is
2060 * not affected by loading a conforming coding segment.
2061 *
2062 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2063 * at SS. (ACP2 regression during install after a far call to ring 2)
2064 */
2065 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2066 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2067 else
2068 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2069 }
2070 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2071 {
2072 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2073 {
2074 /*
2075 * The SS RPL is always equal to the CPL, while the CS RPL
2076 * isn't necessarily equal if the segment is conforming.
2077 * See section 4.11.1 in the AMD manual.
2078 */
2079 cpl = (pCtxCore->ss & X86_SEL_RPL);
2080#ifndef IN_RING0
2081 if (cpl == 1)
2082 cpl = 0;
2083#endif
2084 }
2085 else
2086 cpl = 3;
2087 }
2088 else
2089 cpl = 0; /* real mode; cpl is zero */
2090
2091 return cpl;
2092}
2093
2094
2095/**
2096 * Gets the current guest CPU mode.
2097 *
2098 * If paging mode is what you need, check out PGMGetGuestMode().
2099 *
2100 * @returns The CPU mode.
2101 * @param pVCpu The VMCPU handle.
2102 */
2103VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2104{
2105 CPUMMODE enmMode;
2106 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2107 enmMode = CPUMMODE_REAL;
2108 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2109 enmMode = CPUMMODE_PROTECTED;
2110 else
2111 enmMode = CPUMMODE_LONG;
2112
2113 return enmMode;
2114}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette