VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 29450

Last change on this file since 29450 was 29250, checked in by vboxsync, 15 years ago

iprt/asm*.h: split out asm-math.h, don't include asm-*.h from asm.h, don't include asm.h from sup.h. Fixed a couple file headers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 59.5 KB
Line 
1/* $Id: CPUMAllRegs.cpp 29250 2010-05-09 17:53:58Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/cpum.h>
24#include <VBox/patm.h>
25#include <VBox/dbgf.h>
26#include <VBox/mm.h>
27#include "CPUMInternal.h"
28#include <VBox/vm.h>
29#include <VBox/err.h>
30#include <VBox/dis.h>
31#include <VBox/log.h>
32#include <VBox/hwaccm.h>
33#include <VBox/tm.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36#include <iprt/asm-amd64-x86.h>
37#ifdef IN_RING3
38#include <iprt/thread.h>
39#endif
40
41/** Disable stack frame pointer generation here. */
42#if defined(_MSC_VER) && !defined(DEBUG)
43# pragma optimize("y", off)
44#endif
45
46
47/**
48 * Sets or resets an alternative hypervisor context core.
49 *
50 * This is called when we get a hypervisor trap set switch the context
51 * core with the trap frame on the stack. It is called again to reset
52 * back to the default context core when resuming hypervisor execution.
53 *
54 * @param pVCpu The VMCPU handle.
55 * @param pCtxCore Pointer to the alternative context core or NULL
56 * to go back to the default context core.
57 */
58VMMDECL(void) CPUMHyperSetCtxCore(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
59{
60 PVM pVM = pVCpu->CTX_SUFF(pVM);
61
62 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVCpu->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
63 if (!pCtxCore)
64 {
65 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
66 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
67 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
68 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
69 }
70 else
71 {
72 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
73 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
74 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
75 }
76}
77
78
79/**
80 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
81 * This is only for reading in order to save a few calls.
82 *
83 * @param pVM Handle to the virtual machine.
84 */
85VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
86{
87 return pVCpu->cpum.s.CTX_SUFF(pHyperCore);
88}
89
90
91/**
92 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
93 *
94 * @returns VBox status code.
95 * @param pVM Handle to the virtual machine.
96 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
97 *
98 * @deprecated This will *not* (and has never) given the right picture of the
99 * hypervisor register state. With CPUMHyperSetCtxCore() this is
100 * getting much worse. So, use the individual functions for getting
101 * and esp. setting the hypervisor registers.
102 */
103VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx)
104{
105 *ppCtx = &pVCpu->cpum.s.Hyper;
106 return VINF_SUCCESS;
107}
108
109
110VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
111{
112 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
113 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
114 pVCpu->cpum.s.Hyper.gdtrPadding = 0;
115}
116
117
118VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
119{
120 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
121 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
122 pVCpu->cpum.s.Hyper.idtrPadding = 0;
123}
124
125
126VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
127{
128 pVCpu->cpum.s.Hyper.cr3 = cr3;
129
130#ifdef IN_RC
131 /* Update the current CR3. */
132 ASMSetCR3(cr3);
133#endif
134}
135
136VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
137{
138 return pVCpu->cpum.s.Hyper.cr3;
139}
140
141
142VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
143{
144 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
145}
146
147
148VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
149{
150 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
151}
152
153
154VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
155{
156 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
157}
158
159
160VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
161{
162 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
163}
164
165
166VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
167{
168 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
169}
170
171
172VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
173{
174 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
175}
176
177
178VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
179{
180 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
181}
182
183
184VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
185{
186 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
187 return VINF_SUCCESS;
188}
189
190
191VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
192{
193 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
194}
195
196
197VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
198{
199 pVCpu->cpum.s.Hyper.tr = SelTR;
200}
201
202
203VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
204{
205 pVCpu->cpum.s.Hyper.ldtr = SelLDTR;
206}
207
208
209VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
210{
211 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
212 /** @todo in GC we must load it! */
213}
214
215
216VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
217{
218 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
219 /** @todo in GC we must load it! */
220}
221
222
223VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
224{
225 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
226 /** @todo in GC we must load it! */
227}
228
229
230VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
231{
232 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
233 /** @todo in GC we must load it! */
234}
235
236
237VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
238{
239 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
240 /** @todo in GC we must load it! */
241}
242
243
244VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
245{
246 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
247 /** @todo in GC we must load it! */
248}
249
250
251VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
252{
253 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs;
254}
255
256
257VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
258{
259 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds;
260}
261
262
263VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
264{
265 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es;
266}
267
268
269VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
270{
271 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs;
272}
273
274
275VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
276{
277 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs;
278}
279
280
281VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
282{
283 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss;
284}
285
286
287VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
288{
289 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eax;
290}
291
292
293VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
294{
295 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebx;
296}
297
298
299VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
300{
301 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ecx;
302}
303
304
305VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
306{
307 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edx;
308}
309
310
311VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
312{
313 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esi;
314}
315
316
317VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
318{
319 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edi;
320}
321
322
323VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
324{
325 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebp;
326}
327
328
329VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
330{
331 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp;
332}
333
334
335VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
336{
337 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
338}
339
340
341VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
342{
343 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip;
344}
345
346
347VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
348{
349 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->rip;
350}
351
352
353VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
354{
355 if (pcbLimit)
356 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
357 return pVCpu->cpum.s.Hyper.idtr.pIdt;
358}
359
360
361VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
362{
363 if (pcbLimit)
364 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
365 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
366}
367
368
369VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
370{
371 return pVCpu->cpum.s.Hyper.ldtr;
372}
373
374
375VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
376{
377 return pVCpu->cpum.s.Hyper.dr[0];
378}
379
380
381VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
382{
383 return pVCpu->cpum.s.Hyper.dr[1];
384}
385
386
387VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
388{
389 return pVCpu->cpum.s.Hyper.dr[2];
390}
391
392
393VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
394{
395 return pVCpu->cpum.s.Hyper.dr[3];
396}
397
398
399VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
400{
401 return pVCpu->cpum.s.Hyper.dr[6];
402}
403
404
405VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
406{
407 return pVCpu->cpum.s.Hyper.dr[7];
408}
409
410
411/**
412 * Gets the pointer to the internal CPUMCTXCORE structure.
413 * This is only for reading in order to save a few calls.
414 *
415 * @param pVCpu Handle to the virtual cpu.
416 */
417VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
418{
419 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
420}
421
422
423/**
424 * Sets the guest context core registers.
425 *
426 * @param pVCpu Handle to the virtual cpu.
427 * @param pCtxCore The new context core values.
428 */
429VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
430{
431 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
432
433 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
434 *pCtxCoreDst = *pCtxCore;
435
436 /* Mask away invalid parts of the cpu context. */
437 if (!CPUMIsGuestInLongMode(pVCpu))
438 {
439 uint64_t u64Mask = UINT64_C(0xffffffff);
440
441 pCtxCoreDst->rip &= u64Mask;
442 pCtxCoreDst->rax &= u64Mask;
443 pCtxCoreDst->rbx &= u64Mask;
444 pCtxCoreDst->rcx &= u64Mask;
445 pCtxCoreDst->rdx &= u64Mask;
446 pCtxCoreDst->rsi &= u64Mask;
447 pCtxCoreDst->rdi &= u64Mask;
448 pCtxCoreDst->rbp &= u64Mask;
449 pCtxCoreDst->rsp &= u64Mask;
450 pCtxCoreDst->rflags.u &= u64Mask;
451
452 pCtxCoreDst->r8 = 0;
453 pCtxCoreDst->r9 = 0;
454 pCtxCoreDst->r10 = 0;
455 pCtxCoreDst->r11 = 0;
456 pCtxCoreDst->r12 = 0;
457 pCtxCoreDst->r13 = 0;
458 pCtxCoreDst->r14 = 0;
459 pCtxCoreDst->r15 = 0;
460 }
461}
462
463
464/**
465 * Queries the pointer to the internal CPUMCTX structure
466 *
467 * @returns The CPUMCTX pointer.
468 * @param pVCpu Handle to the virtual cpu.
469 */
470VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
471{
472 return &pVCpu->cpum.s.Guest;
473}
474
475VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
476{
477 pVCpu->cpum.s.Guest.gdtr.cbGdt = limit;
478 pVCpu->cpum.s.Guest.gdtr.pGdt = addr;
479 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
480 return VINF_SUCCESS;
481}
482
483VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
484{
485 pVCpu->cpum.s.Guest.idtr.cbIdt = limit;
486 pVCpu->cpum.s.Guest.idtr.pIdt = addr;
487 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
488 return VINF_SUCCESS;
489}
490
491VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
492{
493 AssertMsgFailed(("Need to load the hidden bits too!\n"));
494
495 pVCpu->cpum.s.Guest.tr = tr;
496 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
497 return VINF_SUCCESS;
498}
499
500VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
501{
502 pVCpu->cpum.s.Guest.ldtr = ldtr;
503 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
504 return VINF_SUCCESS;
505}
506
507
508/**
509 * Set the guest CR0.
510 *
511 * When called in GC, the hyper CR0 may be updated if that is
512 * required. The caller only has to take special action if AM,
513 * WP, PG or PE changes.
514 *
515 * @returns VINF_SUCCESS (consider it void).
516 * @param pVCpu Handle to the virtual cpu.
517 * @param cr0 The new CR0 value.
518 */
519VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
520{
521#ifdef IN_RC
522 /*
523 * Check if we need to change hypervisor CR0 because
524 * of math stuff.
525 */
526 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
527 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
528 {
529 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
530 {
531 /*
532 * We haven't saved the host FPU state yet, so TS and MT are both set
533 * and EM should be reflecting the guest EM (it always does this).
534 */
535 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
536 {
537 uint32_t HyperCR0 = ASMGetCR0();
538 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
539 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
540 HyperCR0 &= ~X86_CR0_EM;
541 HyperCR0 |= cr0 & X86_CR0_EM;
542 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
543 ASMSetCR0(HyperCR0);
544 }
545# ifdef VBOX_STRICT
546 else
547 {
548 uint32_t HyperCR0 = ASMGetCR0();
549 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
550 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
551 }
552# endif
553 }
554 else
555 {
556 /*
557 * Already saved the state, so we're just mirroring
558 * the guest flags.
559 */
560 uint32_t HyperCR0 = ASMGetCR0();
561 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
562 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
563 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
564 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
565 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
566 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
567 ASMSetCR0(HyperCR0);
568 }
569 }
570#endif /* IN_RC */
571
572 /*
573 * Check for changes causing TLB flushes (for REM).
574 * The caller is responsible for calling PGM when appropriate.
575 */
576 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
577 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
578 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
579 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
580
581 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
582 return VINF_SUCCESS;
583}
584
585
586VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
587{
588 pVCpu->cpum.s.Guest.cr2 = cr2;
589 return VINF_SUCCESS;
590}
591
592
593VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
594{
595 pVCpu->cpum.s.Guest.cr3 = cr3;
596 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
597 return VINF_SUCCESS;
598}
599
600
601VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
602{
603 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
604 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
605 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
606 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
607 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
608 cr4 &= ~X86_CR4_OSFSXR;
609 pVCpu->cpum.s.Guest.cr4 = cr4;
610 return VINF_SUCCESS;
611}
612
613
614VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
615{
616 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
617 return VINF_SUCCESS;
618}
619
620
621VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
622{
623 pVCpu->cpum.s.Guest.eip = eip;
624 return VINF_SUCCESS;
625}
626
627
628VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
629{
630 pVCpu->cpum.s.Guest.eax = eax;
631 return VINF_SUCCESS;
632}
633
634
635VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
636{
637 pVCpu->cpum.s.Guest.ebx = ebx;
638 return VINF_SUCCESS;
639}
640
641
642VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
643{
644 pVCpu->cpum.s.Guest.ecx = ecx;
645 return VINF_SUCCESS;
646}
647
648
649VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
650{
651 pVCpu->cpum.s.Guest.edx = edx;
652 return VINF_SUCCESS;
653}
654
655
656VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
657{
658 pVCpu->cpum.s.Guest.esp = esp;
659 return VINF_SUCCESS;
660}
661
662
663VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
664{
665 pVCpu->cpum.s.Guest.ebp = ebp;
666 return VINF_SUCCESS;
667}
668
669
670VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
671{
672 pVCpu->cpum.s.Guest.esi = esi;
673 return VINF_SUCCESS;
674}
675
676
677VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
678{
679 pVCpu->cpum.s.Guest.edi = edi;
680 return VINF_SUCCESS;
681}
682
683
684VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
685{
686 pVCpu->cpum.s.Guest.ss = ss;
687 return VINF_SUCCESS;
688}
689
690
691VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
692{
693 pVCpu->cpum.s.Guest.cs = cs;
694 return VINF_SUCCESS;
695}
696
697
698VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
699{
700 pVCpu->cpum.s.Guest.ds = ds;
701 return VINF_SUCCESS;
702}
703
704
705VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
706{
707 pVCpu->cpum.s.Guest.es = es;
708 return VINF_SUCCESS;
709}
710
711
712VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
713{
714 pVCpu->cpum.s.Guest.fs = fs;
715 return VINF_SUCCESS;
716}
717
718
719VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
720{
721 pVCpu->cpum.s.Guest.gs = gs;
722 return VINF_SUCCESS;
723}
724
725
726VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
727{
728 pVCpu->cpum.s.Guest.msrEFER = val;
729}
730
731
732VMMDECL(uint64_t) CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
733{
734 uint64_t u64 = 0;
735 uint8_t u8Multiplier = 4;
736
737 switch (idMsr)
738 {
739 case MSR_IA32_TSC:
740 u64 = TMCpuTickGet(pVCpu);
741 break;
742
743 case MSR_IA32_CR_PAT:
744 u64 = pVCpu->cpum.s.Guest.msrPAT;
745 break;
746
747 case MSR_IA32_SYSENTER_CS:
748 u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
749 break;
750
751 case MSR_IA32_SYSENTER_EIP:
752 u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
753 break;
754
755 case MSR_IA32_SYSENTER_ESP:
756 u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
757 break;
758
759 case MSR_K6_EFER:
760 u64 = pVCpu->cpum.s.Guest.msrEFER;
761 break;
762
763 case MSR_K8_SF_MASK:
764 u64 = pVCpu->cpum.s.Guest.msrSFMASK;
765 break;
766
767 case MSR_K6_STAR:
768 u64 = pVCpu->cpum.s.Guest.msrSTAR;
769 break;
770
771 case MSR_K8_LSTAR:
772 u64 = pVCpu->cpum.s.Guest.msrLSTAR;
773 break;
774
775 case MSR_K8_CSTAR:
776 u64 = pVCpu->cpum.s.Guest.msrCSTAR;
777 break;
778
779 case MSR_K8_KERNEL_GS_BASE:
780 u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
781 break;
782
783 case MSR_K8_TSC_AUX:
784 u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
785 break;
786
787 case MSR_IA32_PERF_STATUS:
788 /** @todo: could really be not exactly correct, maybe use host's values */
789 /* Keep consistent with helper_rdmsr() in REM */
790 u64 = (1000ULL /* TSC increment by tick */)
791 | ((uint64_t)u8Multiplier << 24 /* CPU multiplier (aka bus ratio) min */ )
792 | ((uint64_t)u8Multiplier << 40 /* CPU multiplier (aka bus ratio) max */ );
793 break;
794
795 case MSR_IA32_FSB_CLOCK_STS:
796 /**
797 * Encoded as:
798 * 0 - 266
799 * 1 - 133
800 * 2 - 200
801 * 3 - return 166
802 * 5 - return 100
803 */
804 u64 = (2 << 4);
805 break;
806
807 case MSR_IA32_PLATFORM_INFO:
808 u64 = ((u8Multiplier)<<8 /* Flex ratio max */)
809 | ((uint64_t)u8Multiplier << 40 /* Flex ratio min */ );
810 break;
811
812 case MSR_IA32_THERM_STATUS:
813 /* CPU temperature reltive to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
814 u64 = (1 << 31) /* validity bit */ |
815 (20 << 16) /* degrees till TCC */;
816 break;
817
818 case MSR_IA32_MISC_ENABLE:
819#if 0
820 /* Needs to be tested more before enabling. */
821 u64 = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
822#else
823 u64 = 0;
824#endif
825 break;
826
827 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
828 default:
829 AssertFailed();
830 break;
831 }
832 return u64;
833}
834
835VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
836{
837 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
838 switch (idMsr)
839 {
840 case MSR_K8_TSC_AUX:
841 pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
842 break;
843
844 case MSR_IA32_MISC_ENABLE:
845 pVCpu->cpum.s.GuestMsr.msr.miscEnable = valMsr;
846 break;
847
848 default:
849 AssertFailed();
850 break;
851 }
852}
853
854VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
855{
856 if (pcbLimit)
857 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
858 return pVCpu->cpum.s.Guest.idtr.pIdt;
859}
860
861
862VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
863{
864 if (pHidden)
865 *pHidden = pVCpu->cpum.s.Guest.trHid;
866 return pVCpu->cpum.s.Guest.tr;
867}
868
869
870VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
871{
872 return pVCpu->cpum.s.Guest.cs;
873}
874
875
876VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
877{
878 return pVCpu->cpum.s.Guest.ds;
879}
880
881
882VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
883{
884 return pVCpu->cpum.s.Guest.es;
885}
886
887
888VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
889{
890 return pVCpu->cpum.s.Guest.fs;
891}
892
893
894VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
895{
896 return pVCpu->cpum.s.Guest.gs;
897}
898
899
900VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
901{
902 return pVCpu->cpum.s.Guest.ss;
903}
904
905
906VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
907{
908 return pVCpu->cpum.s.Guest.ldtr;
909}
910
911
912VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
913{
914 return pVCpu->cpum.s.Guest.cr0;
915}
916
917
918VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
919{
920 return pVCpu->cpum.s.Guest.cr2;
921}
922
923
924VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
925{
926 return pVCpu->cpum.s.Guest.cr3;
927}
928
929
930VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
931{
932 return pVCpu->cpum.s.Guest.cr4;
933}
934
935
936VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
937{
938 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
939}
940
941
942VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
943{
944 return pVCpu->cpum.s.Guest.eip;
945}
946
947
948VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
949{
950 return pVCpu->cpum.s.Guest.rip;
951}
952
953
954VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
955{
956 return pVCpu->cpum.s.Guest.eax;
957}
958
959
960VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
961{
962 return pVCpu->cpum.s.Guest.ebx;
963}
964
965
966VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
967{
968 return pVCpu->cpum.s.Guest.ecx;
969}
970
971
972VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
973{
974 return pVCpu->cpum.s.Guest.edx;
975}
976
977
978VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
979{
980 return pVCpu->cpum.s.Guest.esi;
981}
982
983
984VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
985{
986 return pVCpu->cpum.s.Guest.edi;
987}
988
989
990VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
991{
992 return pVCpu->cpum.s.Guest.esp;
993}
994
995
996VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
997{
998 return pVCpu->cpum.s.Guest.ebp;
999}
1000
1001
1002VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1003{
1004 return pVCpu->cpum.s.Guest.eflags.u32;
1005}
1006
1007
1008///@todo: crx should be an array
1009VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1010{
1011 switch (iReg)
1012 {
1013 case USE_REG_CR0:
1014 *pValue = pVCpu->cpum.s.Guest.cr0;
1015 break;
1016 case USE_REG_CR2:
1017 *pValue = pVCpu->cpum.s.Guest.cr2;
1018 break;
1019 case USE_REG_CR3:
1020 *pValue = pVCpu->cpum.s.Guest.cr3;
1021 break;
1022 case USE_REG_CR4:
1023 *pValue = pVCpu->cpum.s.Guest.cr4;
1024 break;
1025 default:
1026 return VERR_INVALID_PARAMETER;
1027 }
1028 return VINF_SUCCESS;
1029}
1030
1031
1032VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1033{
1034 return pVCpu->cpum.s.Guest.dr[0];
1035}
1036
1037
1038VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1039{
1040 return pVCpu->cpum.s.Guest.dr[1];
1041}
1042
1043
1044VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1045{
1046 return pVCpu->cpum.s.Guest.dr[2];
1047}
1048
1049
1050VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1051{
1052 return pVCpu->cpum.s.Guest.dr[3];
1053}
1054
1055
1056VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1057{
1058 return pVCpu->cpum.s.Guest.dr[6];
1059}
1060
1061
1062VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1063{
1064 return pVCpu->cpum.s.Guest.dr[7];
1065}
1066
1067
1068VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1069{
1070 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1071 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1072 if (iReg == 4 || iReg == 5)
1073 iReg += 2;
1074 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1075 return VINF_SUCCESS;
1076}
1077
1078
1079VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1080{
1081 return pVCpu->cpum.s.Guest.msrEFER;
1082}
1083
1084
1085/**
1086 * Gets a CpuId leaf.
1087 *
1088 * @param pVCpu The VMCPU handle.
1089 * @param iLeaf The CPUID leaf to get.
1090 * @param pEax Where to store the EAX value.
1091 * @param pEbx Where to store the EBX value.
1092 * @param pEcx Where to store the ECX value.
1093 * @param pEdx Where to store the EDX value.
1094 */
1095VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1096{
1097 PVM pVM = pVCpu->CTX_SUFF(pVM);
1098
1099 PCCPUMCPUID pCpuId;
1100 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1101 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1102 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1103 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1104 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1105 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1106 else
1107 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1108
1109 uint32_t cCurrentCacheIndex = *pEcx;
1110
1111 *pEax = pCpuId->eax;
1112 *pEbx = pCpuId->ebx;
1113 *pEcx = pCpuId->ecx;
1114 *pEdx = pCpuId->edx;
1115
1116 if ( iLeaf == 1)
1117 {
1118 /* Bits 31-24: Initial APIC ID */
1119 Assert(pVCpu->idCpu <= 255);
1120 *pEbx |= (pVCpu->idCpu << 24);
1121 }
1122
1123 if ( iLeaf == 4
1124 && cCurrentCacheIndex < 3
1125 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1126 {
1127 uint32_t type, level, sharing, linesize,
1128 partitions, associativity, sets, cores;
1129
1130 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1131 partitions = 1;
1132 /* Those are only to shut up compiler, as they will always
1133 get overwritten, and compiler should be able to figure that out */
1134 sets = associativity = sharing = level = 1;
1135 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1136 switch (cCurrentCacheIndex)
1137 {
1138 case 0:
1139 type = 1;
1140 level = 1;
1141 sharing = 1;
1142 linesize = 64;
1143 associativity = 8;
1144 sets = 64;
1145 break;
1146 case 1:
1147 level = 1;
1148 type = 2;
1149 sharing = 1;
1150 linesize = 64;
1151 associativity = 8;
1152 sets = 64;
1153 break;
1154 default: /* shut up gcc.*/
1155 AssertFailed();
1156 case 2:
1157 level = 2;
1158 type = 3;
1159 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1160 linesize = 64;
1161 associativity = 24;
1162 sets = 4096;
1163 break;
1164 }
1165
1166 *pEax |= ((cores - 1) << 26) |
1167 ((sharing - 1) << 14) |
1168 (level << 5) |
1169 1;
1170 *pEbx = (linesize - 1) |
1171 ((partitions - 1) << 12) |
1172 ((associativity - 1) << 22); /* -1 encoding */
1173 *pEcx = sets - 1;
1174 }
1175
1176 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1177}
1178
1179/**
1180 * Gets a number of standard CPUID leafs.
1181 *
1182 * @returns Number of leafs.
1183 * @param pVM The VM handle.
1184 * @remark Intended for PATM.
1185 */
1186VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1187{
1188 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1189}
1190
1191
1192/**
1193 * Gets a number of extended CPUID leafs.
1194 *
1195 * @returns Number of leafs.
1196 * @param pVM The VM handle.
1197 * @remark Intended for PATM.
1198 */
1199VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1200{
1201 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1202}
1203
1204
1205/**
1206 * Gets a number of centaur CPUID leafs.
1207 *
1208 * @returns Number of leafs.
1209 * @param pVM The VM handle.
1210 * @remark Intended for PATM.
1211 */
1212VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1213{
1214 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1215}
1216
1217
1218/**
1219 * Sets a CPUID feature bit.
1220 *
1221 * @param pVM The VM Handle.
1222 * @param enmFeature The feature to set.
1223 */
1224VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1225{
1226 switch (enmFeature)
1227 {
1228 /*
1229 * Set the APIC bit in both feature masks.
1230 */
1231 case CPUMCPUIDFEATURE_APIC:
1232 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1233 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1234 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1235 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1236 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1237 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1238 break;
1239
1240 /*
1241 * Set the x2APIC bit in the standard feature mask.
1242 */
1243 case CPUMCPUIDFEATURE_X2APIC:
1244 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1245 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1246 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1247 break;
1248
1249 /*
1250 * Set the sysenter/sysexit bit in the standard feature mask.
1251 * Assumes the caller knows what it's doing! (host must support these)
1252 */
1253 case CPUMCPUIDFEATURE_SEP:
1254 {
1255 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1256 {
1257 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1258 return;
1259 }
1260
1261 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1262 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1263 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1264 break;
1265 }
1266
1267 /*
1268 * Set the syscall/sysret bit in the extended feature mask.
1269 * Assumes the caller knows what it's doing! (host must support these)
1270 */
1271 case CPUMCPUIDFEATURE_SYSCALL:
1272 {
1273 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1274 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1275 {
1276#if HC_ARCH_BITS == 32
1277 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1278 * Even when the cpu is capable of doing so in 64 bits mode.
1279 */
1280 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1281 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1282 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1283#endif
1284 {
1285 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1286 return;
1287 }
1288 }
1289 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1290 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1291 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1292 break;
1293 }
1294
1295 /*
1296 * Set the PAE bit in both feature masks.
1297 * Assumes the caller knows what it's doing! (host must support these)
1298 */
1299 case CPUMCPUIDFEATURE_PAE:
1300 {
1301 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1302 {
1303 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1304 return;
1305 }
1306
1307 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1308 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1309 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1310 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1311 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1312 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1313 break;
1314 }
1315
1316 /*
1317 * Set the LONG MODE bit in the extended feature mask.
1318 * Assumes the caller knows what it's doing! (host must support these)
1319 */
1320 case CPUMCPUIDFEATURE_LONG_MODE:
1321 {
1322 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1323 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1324 {
1325 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1326 return;
1327 }
1328
1329 /* Valid for both Intel and AMD. */
1330 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1331 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1332 break;
1333 }
1334
1335 /*
1336 * Set the NXE bit in the extended feature mask.
1337 * Assumes the caller knows what it's doing! (host must support these)
1338 */
1339 case CPUMCPUIDFEATURE_NXE:
1340 {
1341 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1342 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1343 {
1344 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1345 return;
1346 }
1347
1348 /* Valid for both Intel and AMD. */
1349 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1350 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1351 break;
1352 }
1353
1354 case CPUMCPUIDFEATURE_LAHF:
1355 {
1356 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1357 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1358 {
1359 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1360 return;
1361 }
1362
1363 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1364 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1365 break;
1366 }
1367
1368 case CPUMCPUIDFEATURE_PAT:
1369 {
1370 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1371 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1372 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1373 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1374 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1375 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1376 break;
1377 }
1378
1379 case CPUMCPUIDFEATURE_RDTSCP:
1380 {
1381 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1382 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP))
1383 {
1384 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1385 return;
1386 }
1387
1388 /* Valid for AMD only (for now). */
1389 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1390 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1391 break;
1392 }
1393
1394 default:
1395 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1396 break;
1397 }
1398 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1399 {
1400 PVMCPU pVCpu = &pVM->aCpus[i];
1401 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1402 }
1403}
1404
1405
1406/**
1407 * Queries a CPUID feature bit.
1408 *
1409 * @returns boolean for feature presence
1410 * @param pVM The VM Handle.
1411 * @param enmFeature The feature to query.
1412 */
1413VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1414{
1415 switch (enmFeature)
1416 {
1417 case CPUMCPUIDFEATURE_PAE:
1418 {
1419 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1420 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1421 break;
1422 }
1423
1424 case CPUMCPUIDFEATURE_RDTSCP:
1425 {
1426 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1427 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1428 break;
1429 }
1430
1431 case CPUMCPUIDFEATURE_LONG_MODE:
1432 {
1433 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1434 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1435 break;
1436 }
1437
1438 default:
1439 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1440 break;
1441 }
1442 return false;
1443}
1444
1445
1446/**
1447 * Clears a CPUID feature bit.
1448 *
1449 * @param pVM The VM Handle.
1450 * @param enmFeature The feature to clear.
1451 */
1452VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1453{
1454 switch (enmFeature)
1455 {
1456 /*
1457 * Set the APIC bit in both feature masks.
1458 */
1459 case CPUMCPUIDFEATURE_APIC:
1460 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1461 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1462 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1463 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1464 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1465 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1466 break;
1467
1468 /*
1469 * Clear the x2APIC bit in the standard feature mask.
1470 */
1471 case CPUMCPUIDFEATURE_X2APIC:
1472 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1473 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1474 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1475 break;
1476
1477 case CPUMCPUIDFEATURE_PAE:
1478 {
1479 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1480 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1481 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1482 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1483 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1484 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1485 break;
1486 }
1487
1488 case CPUMCPUIDFEATURE_PAT:
1489 {
1490 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1491 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1492 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1493 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1494 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1495 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1496 break;
1497 }
1498
1499 case CPUMCPUIDFEATURE_LONG_MODE:
1500 {
1501 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1502 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1503 break;
1504 }
1505
1506 case CPUMCPUIDFEATURE_LAHF:
1507 {
1508 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1509 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1510 break;
1511 }
1512
1513 default:
1514 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1515 break;
1516 }
1517 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1518 {
1519 PVMCPU pVCpu = &pVM->aCpus[i];
1520 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1521 }
1522}
1523
1524
1525/**
1526 * Gets the host CPU vendor
1527 *
1528 * @returns CPU vendor
1529 * @param pVM The VM handle.
1530 */
1531VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1532{
1533 return pVM->cpum.s.enmHostCpuVendor;
1534}
1535
1536/**
1537 * Gets the CPU vendor
1538 *
1539 * @returns CPU vendor
1540 * @param pVM The VM handle.
1541 */
1542VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1543{
1544 return pVM->cpum.s.enmGuestCpuVendor;
1545}
1546
1547
1548VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1549{
1550 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1551 return CPUMRecalcHyperDRx(pVCpu);
1552}
1553
1554
1555VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1556{
1557 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1558 return CPUMRecalcHyperDRx(pVCpu);
1559}
1560
1561
1562VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1563{
1564 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1565 return CPUMRecalcHyperDRx(pVCpu);
1566}
1567
1568
1569VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1570{
1571 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1572 return CPUMRecalcHyperDRx(pVCpu);
1573}
1574
1575
1576VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1577{
1578 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1579 return CPUMRecalcHyperDRx(pVCpu);
1580}
1581
1582
1583VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1584{
1585 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1586 return CPUMRecalcHyperDRx(pVCpu);
1587}
1588
1589
1590VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1591{
1592 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1593 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1594 if (iReg == 4 || iReg == 5)
1595 iReg += 2;
1596 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1597 return CPUMRecalcHyperDRx(pVCpu);
1598}
1599
1600
1601/**
1602 * Recalculates the hypvervisor DRx register values based on
1603 * current guest registers and DBGF breakpoints.
1604 *
1605 * This is called whenever a guest DRx register is modified and when DBGF
1606 * sets a hardware breakpoint. In guest context this function will reload
1607 * any (hyper) DRx registers which comes out with a different value.
1608 *
1609 * @returns VINF_SUCCESS.
1610 * @param pVCpu The VMCPU handle.
1611 */
1612VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1613{
1614 PVM pVM = pVCpu->CTX_SUFF(pVM);
1615
1616 /*
1617 * Compare the DR7s first.
1618 *
1619 * We only care about the enabled flags. The GE and LE flags are always
1620 * set and we don't care if the guest doesn't set them. GD is virtualized
1621 * when we dispatch #DB, we never enable it.
1622 */
1623 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1624#ifdef CPUM_VIRTUALIZE_DRX
1625 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1626#else
1627 const RTGCUINTREG uGstDr7 = 0;
1628#endif
1629 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1630 {
1631 /*
1632 * Ok, something is enabled. Recalc each of the breakpoints.
1633 * Straight forward code, not optimized/minimized in any way.
1634 */
1635 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1636
1637 /* bp 0 */
1638 RTGCUINTREG uNewDr0;
1639 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1640 {
1641 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1642 uNewDr0 = DBGFBpGetDR0(pVM);
1643 }
1644 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1645 {
1646 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1647 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1648 }
1649 else
1650 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
1651
1652 /* bp 1 */
1653 RTGCUINTREG uNewDr1;
1654 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1655 {
1656 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1657 uNewDr1 = DBGFBpGetDR1(pVM);
1658 }
1659 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1660 {
1661 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1662 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1663 }
1664 else
1665 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
1666
1667 /* bp 2 */
1668 RTGCUINTREG uNewDr2;
1669 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1670 {
1671 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1672 uNewDr2 = DBGFBpGetDR2(pVM);
1673 }
1674 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1675 {
1676 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1677 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1678 }
1679 else
1680 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
1681
1682 /* bp 3 */
1683 RTGCUINTREG uNewDr3;
1684 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1685 {
1686 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1687 uNewDr3 = DBGFBpGetDR3(pVM);
1688 }
1689 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1690 {
1691 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1692 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1693 }
1694 else
1695 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
1696
1697 /*
1698 * Apply the updates.
1699 */
1700#ifdef IN_RC
1701 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1702 {
1703 /** @todo save host DBx registers. */
1704 }
1705#endif
1706 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1707 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1708 CPUMSetHyperDR3(pVCpu, uNewDr3);
1709 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1710 CPUMSetHyperDR2(pVCpu, uNewDr2);
1711 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1712 CPUMSetHyperDR1(pVCpu, uNewDr1);
1713 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1714 CPUMSetHyperDR0(pVCpu, uNewDr0);
1715 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1716 CPUMSetHyperDR7(pVCpu, uNewDr7);
1717 }
1718 else
1719 {
1720#ifdef IN_RC
1721 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1722 {
1723 /** @todo restore host DBx registers. */
1724 }
1725#endif
1726 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1727 }
1728 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1729 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1730 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1731 pVCpu->cpum.s.Hyper.dr[7]));
1732
1733 return VINF_SUCCESS;
1734}
1735
1736
1737/**
1738 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1739 *
1740 * @returns true if in real mode, otherwise false.
1741 * @param pVCpu The virtual CPU handle.
1742 */
1743VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1744{
1745 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1746}
1747
1748
1749/**
1750 * Tests if the guest has the Page Size Extension enabled (PSE).
1751 *
1752 * @returns true if in real mode, otherwise false.
1753 * @param pVCpu The virtual CPU handle.
1754 */
1755VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1756{
1757 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1758 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1759}
1760
1761
1762/**
1763 * Tests if the guest has the paging enabled (PG).
1764 *
1765 * @returns true if in real mode, otherwise false.
1766 * @param pVCpu The virtual CPU handle.
1767 */
1768VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1769{
1770 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1771}
1772
1773
1774/**
1775 * Tests if the guest has the paging enabled (PG).
1776 *
1777 * @returns true if in real mode, otherwise false.
1778 * @param pVCpu The virtual CPU handle.
1779 */
1780VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1781{
1782 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1783}
1784
1785
1786/**
1787 * Tests if the guest is running in real mode or not.
1788 *
1789 * @returns true if in real mode, otherwise false.
1790 * @param pVCpu The virtual CPU handle.
1791 */
1792VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1793{
1794 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1795}
1796
1797
1798/**
1799 * Tests if the guest is running in protected or not.
1800 *
1801 * @returns true if in protected mode, otherwise false.
1802 * @param pVCpu The virtual CPU handle.
1803 */
1804VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1805{
1806 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1807}
1808
1809
1810/**
1811 * Tests if the guest is running in paged protected or not.
1812 *
1813 * @returns true if in paged protected mode, otherwise false.
1814 * @param pVCpu The virtual CPU handle.
1815 */
1816VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1817{
1818 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1819}
1820
1821
1822/**
1823 * Tests if the guest is running in long mode or not.
1824 *
1825 * @returns true if in long mode, otherwise false.
1826 * @param pVCpu The virtual CPU handle.
1827 */
1828VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1829{
1830 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1831}
1832
1833
1834/**
1835 * Tests if the guest is running in PAE mode or not.
1836 *
1837 * @returns true if in PAE mode, otherwise false.
1838 * @param pVCpu The virtual CPU handle.
1839 */
1840VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1841{
1842 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1843 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
1844 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1845}
1846
1847
1848
1849#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1850
1851/**
1852 * Transforms the guest CPU state to raw-ring mode.
1853 *
1854 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1855 *
1856 * @returns VBox status. (recompiler failure)
1857 * @param pVCpu The VMCPU handle.
1858 * @param pCtxCore The context core (for trap usage).
1859 * @see @ref pg_raw
1860 */
1861VMMDECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1862{
1863 PVM pVM = pVCpu->CTX_SUFF(pVM);
1864
1865 Assert(!pVM->cpum.s.fRawEntered);
1866 if (!pCtxCore)
1867 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
1868
1869 /*
1870 * Are we in Ring-0?
1871 */
1872 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1873 && !pCtxCore->eflags.Bits.u1VM)
1874 {
1875 /*
1876 * Enter execution mode.
1877 */
1878 PATMRawEnter(pVM, pCtxCore);
1879
1880 /*
1881 * Set CPL to Ring-1.
1882 */
1883 pCtxCore->ss |= 1;
1884 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1885 pCtxCore->cs |= 1;
1886 }
1887 else
1888 {
1889 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1890 ("ring-1 code not supported\n"));
1891 /*
1892 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1893 */
1894 PATMRawEnter(pVM, pCtxCore);
1895 }
1896
1897 /*
1898 * Assert sanity.
1899 */
1900 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1901 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1902 || pCtxCore->eflags.Bits.u1VM,
1903 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1904 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1905 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1906
1907 pVM->cpum.s.fRawEntered = true;
1908 return VINF_SUCCESS;
1909}
1910
1911
1912/**
1913 * Transforms the guest CPU state from raw-ring mode to correct values.
1914 *
1915 * This function will change any selector registers with DPL=1 to DPL=0.
1916 *
1917 * @returns Adjusted rc.
1918 * @param pVCpu The VMCPU handle.
1919 * @param rc Raw mode return code
1920 * @param pCtxCore The context core (for trap usage).
1921 * @see @ref pg_raw
1922 */
1923VMMDECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
1924{
1925 PVM pVM = pVCpu->CTX_SUFF(pVM);
1926
1927 /*
1928 * Don't leave if we've already left (in GC).
1929 */
1930 Assert(pVM->cpum.s.fRawEntered);
1931 if (!pVM->cpum.s.fRawEntered)
1932 return rc;
1933 pVM->cpum.s.fRawEntered = false;
1934
1935 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1936 if (!pCtxCore)
1937 pCtxCore = CPUMCTX2CORE(pCtx);
1938 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1939 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1940 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1941
1942 /*
1943 * Are we executing in raw ring-1?
1944 */
1945 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1946 && !pCtxCore->eflags.Bits.u1VM)
1947 {
1948 /*
1949 * Leave execution mode.
1950 */
1951 PATMRawLeave(pVM, pCtxCore, rc);
1952 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1953 /** @todo See what happens if we remove this. */
1954 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1955 pCtxCore->ds &= ~X86_SEL_RPL;
1956 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1957 pCtxCore->es &= ~X86_SEL_RPL;
1958 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1959 pCtxCore->fs &= ~X86_SEL_RPL;
1960 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1961 pCtxCore->gs &= ~X86_SEL_RPL;
1962
1963 /*
1964 * Ring-1 selector => Ring-0.
1965 */
1966 pCtxCore->ss &= ~X86_SEL_RPL;
1967 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1968 pCtxCore->cs &= ~X86_SEL_RPL;
1969 }
1970 else
1971 {
1972 /*
1973 * PATM is taking care of the IOPL and IF flags for us.
1974 */
1975 PATMRawLeave(pVM, pCtxCore, rc);
1976 if (!pCtxCore->eflags.Bits.u1VM)
1977 {
1978 /** @todo See what happens if we remove this. */
1979 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1980 pCtxCore->ds &= ~X86_SEL_RPL;
1981 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1982 pCtxCore->es &= ~X86_SEL_RPL;
1983 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1984 pCtxCore->fs &= ~X86_SEL_RPL;
1985 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1986 pCtxCore->gs &= ~X86_SEL_RPL;
1987 }
1988 }
1989
1990 return rc;
1991}
1992
1993/**
1994 * Updates the EFLAGS while we're in raw-mode.
1995 *
1996 * @param pVCpu The VMCPU handle.
1997 * @param pCtxCore The context core.
1998 * @param eflags The new EFLAGS value.
1999 */
2000VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
2001{
2002 PVM pVM = pVCpu->CTX_SUFF(pVM);
2003
2004 if (!pVM->cpum.s.fRawEntered)
2005 {
2006 pCtxCore->eflags.u32 = eflags;
2007 return;
2008 }
2009 PATMRawSetEFlags(pVM, pCtxCore, eflags);
2010}
2011
2012#endif /* !IN_RING0 */
2013
2014/**
2015 * Gets the EFLAGS while we're in raw-mode.
2016 *
2017 * @returns The eflags.
2018 * @param pVCpu The VMCPU handle.
2019 * @param pCtxCore The context core.
2020 */
2021VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2022{
2023#ifdef IN_RING0
2024 return pCtxCore->eflags.u32;
2025#else
2026 PVM pVM = pVCpu->CTX_SUFF(pVM);
2027
2028 if (!pVM->cpum.s.fRawEntered)
2029 return pCtxCore->eflags.u32;
2030 return PATMRawGetEFlags(pVM, pCtxCore);
2031#endif
2032}
2033
2034
2035/**
2036 * Gets and resets the changed flags (CPUM_CHANGED_*).
2037 * Only REM should call this function.
2038 *
2039 * @returns The changed flags.
2040 * @param pVCpu The VMCPU handle.
2041 */
2042VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVMCPU pVCpu)
2043{
2044 unsigned fFlags = pVCpu->cpum.s.fChanged;
2045 pVCpu->cpum.s.fChanged = 0;
2046 /** @todo change the switcher to use the fChanged flags. */
2047 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
2048 {
2049 fFlags |= CPUM_CHANGED_FPU_REM;
2050 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2051 }
2052 return fFlags;
2053}
2054
2055
2056/**
2057 * Sets the specified changed flags (CPUM_CHANGED_*).
2058 *
2059 * @param pVCpu The VMCPU handle.
2060 */
2061VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2062{
2063 pVCpu->cpum.s.fChanged |= fChangedFlags;
2064}
2065
2066
2067/**
2068 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2069 * @returns true if supported.
2070 * @returns false if not supported.
2071 * @param pVM The VM handle.
2072 */
2073VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2074{
2075 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2076}
2077
2078
2079/**
2080 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2081 * @returns true if used.
2082 * @returns false if not used.
2083 * @param pVM The VM handle.
2084 */
2085VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2086{
2087 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2088}
2089
2090
2091/**
2092 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2093 * @returns true if used.
2094 * @returns false if not used.
2095 * @param pVM The VM handle.
2096 */
2097VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2098{
2099 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2100}
2101
2102#ifndef IN_RING3
2103
2104/**
2105 * Lazily sync in the FPU/XMM state
2106 *
2107 * @returns VBox status code.
2108 * @param pVCpu VMCPU handle
2109 */
2110VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2111{
2112 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2113}
2114
2115#endif /* !IN_RING3 */
2116
2117/**
2118 * Checks if we activated the FPU/XMM state of the guest OS
2119 * @returns true if we did.
2120 * @returns false if not.
2121 * @param pVCpu The VMCPU handle.
2122 */
2123VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2124{
2125 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2126}
2127
2128
2129/**
2130 * Deactivate the FPU/XMM state of the guest OS
2131 * @param pVCpu The VMCPU handle.
2132 */
2133VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2134{
2135 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2136}
2137
2138
2139/**
2140 * Checks if the guest debug state is active
2141 *
2142 * @returns boolean
2143 * @param pVM VM handle.
2144 */
2145VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2146{
2147 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2148}
2149
2150/**
2151 * Checks if the hyper debug state is active
2152 *
2153 * @returns boolean
2154 * @param pVM VM handle.
2155 */
2156VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2157{
2158 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2159}
2160
2161
2162/**
2163 * Mark the guest's debug state as inactive
2164 *
2165 * @returns boolean
2166 * @param pVM VM handle.
2167 */
2168VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2169{
2170 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2171}
2172
2173
2174/**
2175 * Mark the hypervisor's debug state as inactive
2176 *
2177 * @returns boolean
2178 * @param pVM VM handle.
2179 */
2180VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2181{
2182 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2183}
2184
2185/**
2186 * Checks if the hidden selector registers are valid
2187 * @returns true if they are.
2188 * @returns false if not.
2189 * @param pVM The VM handle.
2190 */
2191VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
2192{
2193 return HWACCMIsEnabled(pVM);
2194}
2195
2196
2197
2198/**
2199 * Get the current privilege level of the guest.
2200 *
2201 * @returns cpl
2202 * @param pVM VM Handle.
2203 * @param pRegFrame Trap register frame.
2204 */
2205VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2206{
2207 uint32_t cpl;
2208
2209 if (CPUMAreHiddenSelRegsValid(pVCpu->CTX_SUFF(pVM)))
2210 {
2211 /*
2212 * The hidden CS.DPL register is always equal to the CPL, it is
2213 * not affected by loading a conforming coding segment.
2214 *
2215 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2216 * at SS. (ACP2 regression during install after a far call to ring 2)
2217 */
2218 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2219 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2220 else
2221 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2222 }
2223 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2224 {
2225 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2226 {
2227 /*
2228 * The SS RPL is always equal to the CPL, while the CS RPL
2229 * isn't necessarily equal if the segment is conforming.
2230 * See section 4.11.1 in the AMD manual.
2231 */
2232 cpl = (pCtxCore->ss & X86_SEL_RPL);
2233#ifndef IN_RING0
2234 if (cpl == 1)
2235 cpl = 0;
2236#endif
2237 }
2238 else
2239 cpl = 3;
2240 }
2241 else
2242 cpl = 0; /* real mode; cpl is zero */
2243
2244 return cpl;
2245}
2246
2247
2248/**
2249 * Gets the current guest CPU mode.
2250 *
2251 * If paging mode is what you need, check out PGMGetGuestMode().
2252 *
2253 * @returns The CPU mode.
2254 * @param pVCpu The VMCPU handle.
2255 */
2256VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2257{
2258 CPUMMODE enmMode;
2259 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2260 enmMode = CPUMMODE_REAL;
2261 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2262 enmMode = CPUMMODE_PROTECTED;
2263 else
2264 enmMode = CPUMMODE_LONG;
2265
2266 return enmMode;
2267}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette