VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 29206

Last change on this file since 29206 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 59.5 KB
Line 
1/* $Id: CPUMAllRegs.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/cpum.h>
24#include <VBox/patm.h>
25#include <VBox/dbgf.h>
26#include <VBox/mm.h>
27#include "CPUMInternal.h"
28#include <VBox/vm.h>
29#include <VBox/err.h>
30#include <VBox/dis.h>
31#include <VBox/log.h>
32#include <VBox/hwaccm.h>
33#include <VBox/tm.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36#ifdef IN_RING3
37#include <iprt/thread.h>
38#endif
39
40/** Disable stack frame pointer generation here. */
41#if defined(_MSC_VER) && !defined(DEBUG)
42# pragma optimize("y", off)
43#endif
44
45
46/**
47 * Sets or resets an alternative hypervisor context core.
48 *
49 * This is called when we get a hypervisor trap set switch the context
50 * core with the trap frame on the stack. It is called again to reset
51 * back to the default context core when resuming hypervisor execution.
52 *
53 * @param pVCpu The VMCPU handle.
54 * @param pCtxCore Pointer to the alternative context core or NULL
55 * to go back to the default context core.
56 */
57VMMDECL(void) CPUMHyperSetCtxCore(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
58{
59 PVM pVM = pVCpu->CTX_SUFF(pVM);
60
61 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVCpu->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
62 if (!pCtxCore)
63 {
64 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
65 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
66 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
67 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
68 }
69 else
70 {
71 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
72 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
73 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
74 }
75}
76
77
78/**
79 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
80 * This is only for reading in order to save a few calls.
81 *
82 * @param pVM Handle to the virtual machine.
83 */
84VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
85{
86 return pVCpu->cpum.s.CTX_SUFF(pHyperCore);
87}
88
89
90/**
91 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
92 *
93 * @returns VBox status code.
94 * @param pVM Handle to the virtual machine.
95 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
96 *
97 * @deprecated This will *not* (and has never) given the right picture of the
98 * hypervisor register state. With CPUMHyperSetCtxCore() this is
99 * getting much worse. So, use the individual functions for getting
100 * and esp. setting the hypervisor registers.
101 */
102VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx)
103{
104 *ppCtx = &pVCpu->cpum.s.Hyper;
105 return VINF_SUCCESS;
106}
107
108
109VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
110{
111 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
112 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
113 pVCpu->cpum.s.Hyper.gdtrPadding = 0;
114}
115
116
117VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
118{
119 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
120 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
121 pVCpu->cpum.s.Hyper.idtrPadding = 0;
122}
123
124
125VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
126{
127 pVCpu->cpum.s.Hyper.cr3 = cr3;
128
129#ifdef IN_RC
130 /* Update the current CR3. */
131 ASMSetCR3(cr3);
132#endif
133}
134
135VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
136{
137 return pVCpu->cpum.s.Hyper.cr3;
138}
139
140
141VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
142{
143 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
144}
145
146
147VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
148{
149 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
150}
151
152
153VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
154{
155 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
156}
157
158
159VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
160{
161 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
162}
163
164
165VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
166{
167 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
168}
169
170
171VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
172{
173 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
174}
175
176
177VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
178{
179 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
180}
181
182
183VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
184{
185 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
186 return VINF_SUCCESS;
187}
188
189
190VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
191{
192 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
193}
194
195
196VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
197{
198 pVCpu->cpum.s.Hyper.tr = SelTR;
199}
200
201
202VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
203{
204 pVCpu->cpum.s.Hyper.ldtr = SelLDTR;
205}
206
207
208VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
209{
210 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
211 /** @todo in GC we must load it! */
212}
213
214
215VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
216{
217 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
218 /** @todo in GC we must load it! */
219}
220
221
222VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
223{
224 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
225 /** @todo in GC we must load it! */
226}
227
228
229VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
230{
231 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
232 /** @todo in GC we must load it! */
233}
234
235
236VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
237{
238 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
239 /** @todo in GC we must load it! */
240}
241
242
243VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
244{
245 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
246 /** @todo in GC we must load it! */
247}
248
249
250VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
251{
252 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs;
253}
254
255
256VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
257{
258 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds;
259}
260
261
262VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
263{
264 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es;
265}
266
267
268VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
269{
270 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs;
271}
272
273
274VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
275{
276 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs;
277}
278
279
280VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
281{
282 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss;
283}
284
285
286VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
287{
288 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eax;
289}
290
291
292VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
293{
294 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebx;
295}
296
297
298VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
299{
300 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ecx;
301}
302
303
304VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
305{
306 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edx;
307}
308
309
310VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
311{
312 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esi;
313}
314
315
316VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
317{
318 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edi;
319}
320
321
322VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
323{
324 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebp;
325}
326
327
328VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
329{
330 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp;
331}
332
333
334VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
335{
336 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
337}
338
339
340VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
341{
342 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip;
343}
344
345
346VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
347{
348 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->rip;
349}
350
351
352VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
353{
354 if (pcbLimit)
355 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
356 return pVCpu->cpum.s.Hyper.idtr.pIdt;
357}
358
359
360VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
361{
362 if (pcbLimit)
363 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
364 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
365}
366
367
368VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
369{
370 return pVCpu->cpum.s.Hyper.ldtr;
371}
372
373
374VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
375{
376 return pVCpu->cpum.s.Hyper.dr[0];
377}
378
379
380VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
381{
382 return pVCpu->cpum.s.Hyper.dr[1];
383}
384
385
386VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
387{
388 return pVCpu->cpum.s.Hyper.dr[2];
389}
390
391
392VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
393{
394 return pVCpu->cpum.s.Hyper.dr[3];
395}
396
397
398VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
399{
400 return pVCpu->cpum.s.Hyper.dr[6];
401}
402
403
404VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
405{
406 return pVCpu->cpum.s.Hyper.dr[7];
407}
408
409
410/**
411 * Gets the pointer to the internal CPUMCTXCORE structure.
412 * This is only for reading in order to save a few calls.
413 *
414 * @param pVCpu Handle to the virtual cpu.
415 */
416VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
417{
418 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
419}
420
421
422/**
423 * Sets the guest context core registers.
424 *
425 * @param pVCpu Handle to the virtual cpu.
426 * @param pCtxCore The new context core values.
427 */
428VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
429{
430 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
431
432 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
433 *pCtxCoreDst = *pCtxCore;
434
435 /* Mask away invalid parts of the cpu context. */
436 if (!CPUMIsGuestInLongMode(pVCpu))
437 {
438 uint64_t u64Mask = UINT64_C(0xffffffff);
439
440 pCtxCoreDst->rip &= u64Mask;
441 pCtxCoreDst->rax &= u64Mask;
442 pCtxCoreDst->rbx &= u64Mask;
443 pCtxCoreDst->rcx &= u64Mask;
444 pCtxCoreDst->rdx &= u64Mask;
445 pCtxCoreDst->rsi &= u64Mask;
446 pCtxCoreDst->rdi &= u64Mask;
447 pCtxCoreDst->rbp &= u64Mask;
448 pCtxCoreDst->rsp &= u64Mask;
449 pCtxCoreDst->rflags.u &= u64Mask;
450
451 pCtxCoreDst->r8 = 0;
452 pCtxCoreDst->r9 = 0;
453 pCtxCoreDst->r10 = 0;
454 pCtxCoreDst->r11 = 0;
455 pCtxCoreDst->r12 = 0;
456 pCtxCoreDst->r13 = 0;
457 pCtxCoreDst->r14 = 0;
458 pCtxCoreDst->r15 = 0;
459 }
460}
461
462
463/**
464 * Queries the pointer to the internal CPUMCTX structure
465 *
466 * @returns The CPUMCTX pointer.
467 * @param pVCpu Handle to the virtual cpu.
468 */
469VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
470{
471 return &pVCpu->cpum.s.Guest;
472}
473
474VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
475{
476 pVCpu->cpum.s.Guest.gdtr.cbGdt = limit;
477 pVCpu->cpum.s.Guest.gdtr.pGdt = addr;
478 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
479 return VINF_SUCCESS;
480}
481
482VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
483{
484 pVCpu->cpum.s.Guest.idtr.cbIdt = limit;
485 pVCpu->cpum.s.Guest.idtr.pIdt = addr;
486 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
487 return VINF_SUCCESS;
488}
489
490VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
491{
492 AssertMsgFailed(("Need to load the hidden bits too!\n"));
493
494 pVCpu->cpum.s.Guest.tr = tr;
495 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
496 return VINF_SUCCESS;
497}
498
499VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
500{
501 pVCpu->cpum.s.Guest.ldtr = ldtr;
502 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
503 return VINF_SUCCESS;
504}
505
506
507/**
508 * Set the guest CR0.
509 *
510 * When called in GC, the hyper CR0 may be updated if that is
511 * required. The caller only has to take special action if AM,
512 * WP, PG or PE changes.
513 *
514 * @returns VINF_SUCCESS (consider it void).
515 * @param pVCpu Handle to the virtual cpu.
516 * @param cr0 The new CR0 value.
517 */
518VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
519{
520#ifdef IN_RC
521 /*
522 * Check if we need to change hypervisor CR0 because
523 * of math stuff.
524 */
525 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
526 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
527 {
528 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
529 {
530 /*
531 * We haven't saved the host FPU state yet, so TS and MT are both set
532 * and EM should be reflecting the guest EM (it always does this).
533 */
534 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
535 {
536 uint32_t HyperCR0 = ASMGetCR0();
537 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
538 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
539 HyperCR0 &= ~X86_CR0_EM;
540 HyperCR0 |= cr0 & X86_CR0_EM;
541 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
542 ASMSetCR0(HyperCR0);
543 }
544# ifdef VBOX_STRICT
545 else
546 {
547 uint32_t HyperCR0 = ASMGetCR0();
548 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
549 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
550 }
551# endif
552 }
553 else
554 {
555 /*
556 * Already saved the state, so we're just mirroring
557 * the guest flags.
558 */
559 uint32_t HyperCR0 = ASMGetCR0();
560 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
561 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
562 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
563 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
564 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
565 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
566 ASMSetCR0(HyperCR0);
567 }
568 }
569#endif /* IN_RC */
570
571 /*
572 * Check for changes causing TLB flushes (for REM).
573 * The caller is responsible for calling PGM when appropriate.
574 */
575 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
576 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
577 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
578 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
579
580 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
581 return VINF_SUCCESS;
582}
583
584
585VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
586{
587 pVCpu->cpum.s.Guest.cr2 = cr2;
588 return VINF_SUCCESS;
589}
590
591
592VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
593{
594 pVCpu->cpum.s.Guest.cr3 = cr3;
595 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
596 return VINF_SUCCESS;
597}
598
599
600VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
601{
602 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
603 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
604 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
605 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
606 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
607 cr4 &= ~X86_CR4_OSFSXR;
608 pVCpu->cpum.s.Guest.cr4 = cr4;
609 return VINF_SUCCESS;
610}
611
612
613VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
614{
615 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
616 return VINF_SUCCESS;
617}
618
619
620VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
621{
622 pVCpu->cpum.s.Guest.eip = eip;
623 return VINF_SUCCESS;
624}
625
626
627VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
628{
629 pVCpu->cpum.s.Guest.eax = eax;
630 return VINF_SUCCESS;
631}
632
633
634VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
635{
636 pVCpu->cpum.s.Guest.ebx = ebx;
637 return VINF_SUCCESS;
638}
639
640
641VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
642{
643 pVCpu->cpum.s.Guest.ecx = ecx;
644 return VINF_SUCCESS;
645}
646
647
648VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
649{
650 pVCpu->cpum.s.Guest.edx = edx;
651 return VINF_SUCCESS;
652}
653
654
655VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
656{
657 pVCpu->cpum.s.Guest.esp = esp;
658 return VINF_SUCCESS;
659}
660
661
662VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
663{
664 pVCpu->cpum.s.Guest.ebp = ebp;
665 return VINF_SUCCESS;
666}
667
668
669VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
670{
671 pVCpu->cpum.s.Guest.esi = esi;
672 return VINF_SUCCESS;
673}
674
675
676VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
677{
678 pVCpu->cpum.s.Guest.edi = edi;
679 return VINF_SUCCESS;
680}
681
682
683VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
684{
685 pVCpu->cpum.s.Guest.ss = ss;
686 return VINF_SUCCESS;
687}
688
689
690VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
691{
692 pVCpu->cpum.s.Guest.cs = cs;
693 return VINF_SUCCESS;
694}
695
696
697VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
698{
699 pVCpu->cpum.s.Guest.ds = ds;
700 return VINF_SUCCESS;
701}
702
703
704VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
705{
706 pVCpu->cpum.s.Guest.es = es;
707 return VINF_SUCCESS;
708}
709
710
711VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
712{
713 pVCpu->cpum.s.Guest.fs = fs;
714 return VINF_SUCCESS;
715}
716
717
718VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
719{
720 pVCpu->cpum.s.Guest.gs = gs;
721 return VINF_SUCCESS;
722}
723
724
725VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
726{
727 pVCpu->cpum.s.Guest.msrEFER = val;
728}
729
730
731VMMDECL(uint64_t) CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
732{
733 uint64_t u64 = 0;
734 uint8_t u8Multiplier = 4;
735
736 switch (idMsr)
737 {
738 case MSR_IA32_TSC:
739 u64 = TMCpuTickGet(pVCpu);
740 break;
741
742 case MSR_IA32_CR_PAT:
743 u64 = pVCpu->cpum.s.Guest.msrPAT;
744 break;
745
746 case MSR_IA32_SYSENTER_CS:
747 u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
748 break;
749
750 case MSR_IA32_SYSENTER_EIP:
751 u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
752 break;
753
754 case MSR_IA32_SYSENTER_ESP:
755 u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
756 break;
757
758 case MSR_K6_EFER:
759 u64 = pVCpu->cpum.s.Guest.msrEFER;
760 break;
761
762 case MSR_K8_SF_MASK:
763 u64 = pVCpu->cpum.s.Guest.msrSFMASK;
764 break;
765
766 case MSR_K6_STAR:
767 u64 = pVCpu->cpum.s.Guest.msrSTAR;
768 break;
769
770 case MSR_K8_LSTAR:
771 u64 = pVCpu->cpum.s.Guest.msrLSTAR;
772 break;
773
774 case MSR_K8_CSTAR:
775 u64 = pVCpu->cpum.s.Guest.msrCSTAR;
776 break;
777
778 case MSR_K8_KERNEL_GS_BASE:
779 u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
780 break;
781
782 case MSR_K8_TSC_AUX:
783 u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
784 break;
785
786 case MSR_IA32_PERF_STATUS:
787 /** @todo: could really be not exactly correct, maybe use host's values */
788 /* Keep consistent with helper_rdmsr() in REM */
789 u64 = (1000ULL /* TSC increment by tick */)
790 | ((uint64_t)u8Multiplier << 24 /* CPU multiplier (aka bus ratio) min */ )
791 | ((uint64_t)u8Multiplier << 40 /* CPU multiplier (aka bus ratio) max */ );
792 break;
793
794 case MSR_IA32_FSB_CLOCK_STS:
795 /**
796 * Encoded as:
797 * 0 - 266
798 * 1 - 133
799 * 2 - 200
800 * 3 - return 166
801 * 5 - return 100
802 */
803 u64 = (2 << 4);
804 break;
805
806 case MSR_IA32_PLATFORM_INFO:
807 u64 = ((u8Multiplier)<<8 /* Flex ratio max */)
808 | ((uint64_t)u8Multiplier << 40 /* Flex ratio min */ );
809 break;
810
811 case MSR_IA32_THERM_STATUS:
812 /* CPU temperature reltive to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
813 u64 = (1 << 31) /* validity bit */ |
814 (20 << 16) /* degrees till TCC */;
815 break;
816
817 case MSR_IA32_MISC_ENABLE:
818#if 0
819 /* Needs to be tested more before enabling. */
820 u64 = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
821#else
822 u64 = 0;
823#endif
824 break;
825
826 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
827 default:
828 AssertFailed();
829 break;
830 }
831 return u64;
832}
833
834VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
835{
836 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
837 switch (idMsr)
838 {
839 case MSR_K8_TSC_AUX:
840 pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
841 break;
842
843 case MSR_IA32_MISC_ENABLE:
844 pVCpu->cpum.s.GuestMsr.msr.miscEnable = valMsr;
845 break;
846
847 default:
848 AssertFailed();
849 break;
850 }
851}
852
853VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
854{
855 if (pcbLimit)
856 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
857 return pVCpu->cpum.s.Guest.idtr.pIdt;
858}
859
860
861VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
862{
863 if (pHidden)
864 *pHidden = pVCpu->cpum.s.Guest.trHid;
865 return pVCpu->cpum.s.Guest.tr;
866}
867
868
869VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
870{
871 return pVCpu->cpum.s.Guest.cs;
872}
873
874
875VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
876{
877 return pVCpu->cpum.s.Guest.ds;
878}
879
880
881VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
882{
883 return pVCpu->cpum.s.Guest.es;
884}
885
886
887VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
888{
889 return pVCpu->cpum.s.Guest.fs;
890}
891
892
893VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
894{
895 return pVCpu->cpum.s.Guest.gs;
896}
897
898
899VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
900{
901 return pVCpu->cpum.s.Guest.ss;
902}
903
904
905VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
906{
907 return pVCpu->cpum.s.Guest.ldtr;
908}
909
910
911VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
912{
913 return pVCpu->cpum.s.Guest.cr0;
914}
915
916
917VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
918{
919 return pVCpu->cpum.s.Guest.cr2;
920}
921
922
923VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
924{
925 return pVCpu->cpum.s.Guest.cr3;
926}
927
928
929VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
930{
931 return pVCpu->cpum.s.Guest.cr4;
932}
933
934
935VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
936{
937 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
938}
939
940
941VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
942{
943 return pVCpu->cpum.s.Guest.eip;
944}
945
946
947VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
948{
949 return pVCpu->cpum.s.Guest.rip;
950}
951
952
953VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
954{
955 return pVCpu->cpum.s.Guest.eax;
956}
957
958
959VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
960{
961 return pVCpu->cpum.s.Guest.ebx;
962}
963
964
965VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
966{
967 return pVCpu->cpum.s.Guest.ecx;
968}
969
970
971VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
972{
973 return pVCpu->cpum.s.Guest.edx;
974}
975
976
977VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
978{
979 return pVCpu->cpum.s.Guest.esi;
980}
981
982
983VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
984{
985 return pVCpu->cpum.s.Guest.edi;
986}
987
988
989VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
990{
991 return pVCpu->cpum.s.Guest.esp;
992}
993
994
995VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
996{
997 return pVCpu->cpum.s.Guest.ebp;
998}
999
1000
1001VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1002{
1003 return pVCpu->cpum.s.Guest.eflags.u32;
1004}
1005
1006
1007///@todo: crx should be an array
1008VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1009{
1010 switch (iReg)
1011 {
1012 case USE_REG_CR0:
1013 *pValue = pVCpu->cpum.s.Guest.cr0;
1014 break;
1015 case USE_REG_CR2:
1016 *pValue = pVCpu->cpum.s.Guest.cr2;
1017 break;
1018 case USE_REG_CR3:
1019 *pValue = pVCpu->cpum.s.Guest.cr3;
1020 break;
1021 case USE_REG_CR4:
1022 *pValue = pVCpu->cpum.s.Guest.cr4;
1023 break;
1024 default:
1025 return VERR_INVALID_PARAMETER;
1026 }
1027 return VINF_SUCCESS;
1028}
1029
1030
1031VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1032{
1033 return pVCpu->cpum.s.Guest.dr[0];
1034}
1035
1036
1037VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1038{
1039 return pVCpu->cpum.s.Guest.dr[1];
1040}
1041
1042
1043VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1044{
1045 return pVCpu->cpum.s.Guest.dr[2];
1046}
1047
1048
1049VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1050{
1051 return pVCpu->cpum.s.Guest.dr[3];
1052}
1053
1054
1055VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1056{
1057 return pVCpu->cpum.s.Guest.dr[6];
1058}
1059
1060
1061VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1062{
1063 return pVCpu->cpum.s.Guest.dr[7];
1064}
1065
1066
1067VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1068{
1069 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1070 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1071 if (iReg == 4 || iReg == 5)
1072 iReg += 2;
1073 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1074 return VINF_SUCCESS;
1075}
1076
1077
1078VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1079{
1080 return pVCpu->cpum.s.Guest.msrEFER;
1081}
1082
1083
1084/**
1085 * Gets a CpuId leaf.
1086 *
1087 * @param pVCpu The VMCPU handle.
1088 * @param iLeaf The CPUID leaf to get.
1089 * @param pEax Where to store the EAX value.
1090 * @param pEbx Where to store the EBX value.
1091 * @param pEcx Where to store the ECX value.
1092 * @param pEdx Where to store the EDX value.
1093 */
1094VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1095{
1096 PVM pVM = pVCpu->CTX_SUFF(pVM);
1097
1098 PCCPUMCPUID pCpuId;
1099 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1100 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1101 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1102 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1103 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1104 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1105 else
1106 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1107
1108 uint32_t cCurrentCacheIndex = *pEcx;
1109
1110 *pEax = pCpuId->eax;
1111 *pEbx = pCpuId->ebx;
1112 *pEcx = pCpuId->ecx;
1113 *pEdx = pCpuId->edx;
1114
1115 if ( iLeaf == 1)
1116 {
1117 /* Bits 31-24: Initial APIC ID */
1118 Assert(pVCpu->idCpu <= 255);
1119 *pEbx |= (pVCpu->idCpu << 24);
1120 }
1121
1122 if ( iLeaf == 4
1123 && cCurrentCacheIndex < 3
1124 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1125 {
1126 uint32_t type, level, sharing, linesize,
1127 partitions, associativity, sets, cores;
1128
1129 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1130 partitions = 1;
1131 /* Those are only to shut up compiler, as they will always
1132 get overwritten, and compiler should be able to figure that out */
1133 sets = associativity = sharing = level = 1;
1134 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1135 switch (cCurrentCacheIndex)
1136 {
1137 case 0:
1138 type = 1;
1139 level = 1;
1140 sharing = 1;
1141 linesize = 64;
1142 associativity = 8;
1143 sets = 64;
1144 break;
1145 case 1:
1146 level = 1;
1147 type = 2;
1148 sharing = 1;
1149 linesize = 64;
1150 associativity = 8;
1151 sets = 64;
1152 break;
1153 default: /* shut up gcc.*/
1154 AssertFailed();
1155 case 2:
1156 level = 2;
1157 type = 3;
1158 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1159 linesize = 64;
1160 associativity = 24;
1161 sets = 4096;
1162 break;
1163 }
1164
1165 *pEax |= ((cores - 1) << 26) |
1166 ((sharing - 1) << 14) |
1167 (level << 5) |
1168 1;
1169 *pEbx = (linesize - 1) |
1170 ((partitions - 1) << 12) |
1171 ((associativity - 1) << 22); /* -1 encoding */
1172 *pEcx = sets - 1;
1173 }
1174
1175 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1176}
1177
1178/**
1179 * Gets a number of standard CPUID leafs.
1180 *
1181 * @returns Number of leafs.
1182 * @param pVM The VM handle.
1183 * @remark Intended for PATM.
1184 */
1185VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1186{
1187 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1188}
1189
1190
1191/**
1192 * Gets a number of extended CPUID leafs.
1193 *
1194 * @returns Number of leafs.
1195 * @param pVM The VM handle.
1196 * @remark Intended for PATM.
1197 */
1198VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1199{
1200 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1201}
1202
1203
1204/**
1205 * Gets a number of centaur CPUID leafs.
1206 *
1207 * @returns Number of leafs.
1208 * @param pVM The VM handle.
1209 * @remark Intended for PATM.
1210 */
1211VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1212{
1213 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1214}
1215
1216
1217/**
1218 * Sets a CPUID feature bit.
1219 *
1220 * @param pVM The VM Handle.
1221 * @param enmFeature The feature to set.
1222 */
1223VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1224{
1225 switch (enmFeature)
1226 {
1227 /*
1228 * Set the APIC bit in both feature masks.
1229 */
1230 case CPUMCPUIDFEATURE_APIC:
1231 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1232 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1233 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1234 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1235 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1236 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1237 break;
1238
1239 /*
1240 * Set the x2APIC bit in the standard feature mask.
1241 */
1242 case CPUMCPUIDFEATURE_X2APIC:
1243 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1244 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1245 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1246 break;
1247
1248 /*
1249 * Set the sysenter/sysexit bit in the standard feature mask.
1250 * Assumes the caller knows what it's doing! (host must support these)
1251 */
1252 case CPUMCPUIDFEATURE_SEP:
1253 {
1254 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1255 {
1256 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1257 return;
1258 }
1259
1260 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1261 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1262 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1263 break;
1264 }
1265
1266 /*
1267 * Set the syscall/sysret bit in the extended feature mask.
1268 * Assumes the caller knows what it's doing! (host must support these)
1269 */
1270 case CPUMCPUIDFEATURE_SYSCALL:
1271 {
1272 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1273 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1274 {
1275#if HC_ARCH_BITS == 32
1276 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1277 * Even when the cpu is capable of doing so in 64 bits mode.
1278 */
1279 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1280 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1281 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1282#endif
1283 {
1284 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1285 return;
1286 }
1287 }
1288 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1289 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1290 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1291 break;
1292 }
1293
1294 /*
1295 * Set the PAE bit in both feature masks.
1296 * Assumes the caller knows what it's doing! (host must support these)
1297 */
1298 case CPUMCPUIDFEATURE_PAE:
1299 {
1300 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1301 {
1302 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1303 return;
1304 }
1305
1306 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1307 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1308 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1309 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1310 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1311 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1312 break;
1313 }
1314
1315 /*
1316 * Set the LONG MODE bit in the extended feature mask.
1317 * Assumes the caller knows what it's doing! (host must support these)
1318 */
1319 case CPUMCPUIDFEATURE_LONG_MODE:
1320 {
1321 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1322 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1323 {
1324 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1325 return;
1326 }
1327
1328 /* Valid for both Intel and AMD. */
1329 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1330 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1331 break;
1332 }
1333
1334 /*
1335 * Set the NXE bit in the extended feature mask.
1336 * Assumes the caller knows what it's doing! (host must support these)
1337 */
1338 case CPUMCPUIDFEATURE_NXE:
1339 {
1340 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1341 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1342 {
1343 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1344 return;
1345 }
1346
1347 /* Valid for both Intel and AMD. */
1348 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1349 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1350 break;
1351 }
1352
1353 case CPUMCPUIDFEATURE_LAHF:
1354 {
1355 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1356 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1357 {
1358 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1359 return;
1360 }
1361
1362 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1363 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1364 break;
1365 }
1366
1367 case CPUMCPUIDFEATURE_PAT:
1368 {
1369 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1370 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1371 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1372 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1373 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1374 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1375 break;
1376 }
1377
1378 case CPUMCPUIDFEATURE_RDTSCP:
1379 {
1380 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1381 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP))
1382 {
1383 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1384 return;
1385 }
1386
1387 /* Valid for AMD only (for now). */
1388 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1389 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1390 break;
1391 }
1392
1393 default:
1394 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1395 break;
1396 }
1397 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1398 {
1399 PVMCPU pVCpu = &pVM->aCpus[i];
1400 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1401 }
1402}
1403
1404
1405/**
1406 * Queries a CPUID feature bit.
1407 *
1408 * @returns boolean for feature presence
1409 * @param pVM The VM Handle.
1410 * @param enmFeature The feature to query.
1411 */
1412VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1413{
1414 switch (enmFeature)
1415 {
1416 case CPUMCPUIDFEATURE_PAE:
1417 {
1418 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1419 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1420 break;
1421 }
1422
1423 case CPUMCPUIDFEATURE_RDTSCP:
1424 {
1425 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1426 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1427 break;
1428 }
1429
1430 case CPUMCPUIDFEATURE_LONG_MODE:
1431 {
1432 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1433 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1434 break;
1435 }
1436
1437 default:
1438 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1439 break;
1440 }
1441 return false;
1442}
1443
1444
1445/**
1446 * Clears a CPUID feature bit.
1447 *
1448 * @param pVM The VM Handle.
1449 * @param enmFeature The feature to clear.
1450 */
1451VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1452{
1453 switch (enmFeature)
1454 {
1455 /*
1456 * Set the APIC bit in both feature masks.
1457 */
1458 case CPUMCPUIDFEATURE_APIC:
1459 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1460 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1461 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1462 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1463 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1464 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1465 break;
1466
1467 /*
1468 * Clear the x2APIC bit in the standard feature mask.
1469 */
1470 case CPUMCPUIDFEATURE_X2APIC:
1471 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1472 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1473 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1474 break;
1475
1476 case CPUMCPUIDFEATURE_PAE:
1477 {
1478 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1479 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1480 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1481 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1482 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1483 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1484 break;
1485 }
1486
1487 case CPUMCPUIDFEATURE_PAT:
1488 {
1489 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1490 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1491 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1492 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1493 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1494 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1495 break;
1496 }
1497
1498 case CPUMCPUIDFEATURE_LONG_MODE:
1499 {
1500 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1501 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1502 break;
1503 }
1504
1505 case CPUMCPUIDFEATURE_LAHF:
1506 {
1507 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1508 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1509 break;
1510 }
1511
1512 default:
1513 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1514 break;
1515 }
1516 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1517 {
1518 PVMCPU pVCpu = &pVM->aCpus[i];
1519 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1520 }
1521}
1522
1523
1524/**
1525 * Gets the host CPU vendor
1526 *
1527 * @returns CPU vendor
1528 * @param pVM The VM handle.
1529 */
1530VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1531{
1532 return pVM->cpum.s.enmHostCpuVendor;
1533}
1534
1535/**
1536 * Gets the CPU vendor
1537 *
1538 * @returns CPU vendor
1539 * @param pVM The VM handle.
1540 */
1541VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1542{
1543 return pVM->cpum.s.enmGuestCpuVendor;
1544}
1545
1546
1547VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1548{
1549 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1550 return CPUMRecalcHyperDRx(pVCpu);
1551}
1552
1553
1554VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1555{
1556 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1557 return CPUMRecalcHyperDRx(pVCpu);
1558}
1559
1560
1561VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1562{
1563 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1564 return CPUMRecalcHyperDRx(pVCpu);
1565}
1566
1567
1568VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1569{
1570 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1571 return CPUMRecalcHyperDRx(pVCpu);
1572}
1573
1574
1575VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1576{
1577 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1578 return CPUMRecalcHyperDRx(pVCpu);
1579}
1580
1581
1582VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1583{
1584 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1585 return CPUMRecalcHyperDRx(pVCpu);
1586}
1587
1588
1589VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1590{
1591 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1592 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1593 if (iReg == 4 || iReg == 5)
1594 iReg += 2;
1595 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1596 return CPUMRecalcHyperDRx(pVCpu);
1597}
1598
1599
1600/**
1601 * Recalculates the hypvervisor DRx register values based on
1602 * current guest registers and DBGF breakpoints.
1603 *
1604 * This is called whenever a guest DRx register is modified and when DBGF
1605 * sets a hardware breakpoint. In guest context this function will reload
1606 * any (hyper) DRx registers which comes out with a different value.
1607 *
1608 * @returns VINF_SUCCESS.
1609 * @param pVCpu The VMCPU handle.
1610 */
1611VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1612{
1613 PVM pVM = pVCpu->CTX_SUFF(pVM);
1614
1615 /*
1616 * Compare the DR7s first.
1617 *
1618 * We only care about the enabled flags. The GE and LE flags are always
1619 * set and we don't care if the guest doesn't set them. GD is virtualized
1620 * when we dispatch #DB, we never enable it.
1621 */
1622 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1623#ifdef CPUM_VIRTUALIZE_DRX
1624 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1625#else
1626 const RTGCUINTREG uGstDr7 = 0;
1627#endif
1628 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1629 {
1630 /*
1631 * Ok, something is enabled. Recalc each of the breakpoints.
1632 * Straight forward code, not optimized/minimized in any way.
1633 */
1634 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1635
1636 /* bp 0 */
1637 RTGCUINTREG uNewDr0;
1638 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1639 {
1640 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1641 uNewDr0 = DBGFBpGetDR0(pVM);
1642 }
1643 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1644 {
1645 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1646 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1647 }
1648 else
1649 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
1650
1651 /* bp 1 */
1652 RTGCUINTREG uNewDr1;
1653 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1654 {
1655 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1656 uNewDr1 = DBGFBpGetDR1(pVM);
1657 }
1658 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1659 {
1660 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1661 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1662 }
1663 else
1664 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
1665
1666 /* bp 2 */
1667 RTGCUINTREG uNewDr2;
1668 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1669 {
1670 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1671 uNewDr2 = DBGFBpGetDR2(pVM);
1672 }
1673 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1674 {
1675 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1676 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1677 }
1678 else
1679 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
1680
1681 /* bp 3 */
1682 RTGCUINTREG uNewDr3;
1683 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1684 {
1685 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1686 uNewDr3 = DBGFBpGetDR3(pVM);
1687 }
1688 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1689 {
1690 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1691 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1692 }
1693 else
1694 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
1695
1696 /*
1697 * Apply the updates.
1698 */
1699#ifdef IN_RC
1700 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1701 {
1702 /** @todo save host DBx registers. */
1703 }
1704#endif
1705 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1706 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1707 CPUMSetHyperDR3(pVCpu, uNewDr3);
1708 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1709 CPUMSetHyperDR2(pVCpu, uNewDr2);
1710 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1711 CPUMSetHyperDR1(pVCpu, uNewDr1);
1712 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1713 CPUMSetHyperDR0(pVCpu, uNewDr0);
1714 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1715 CPUMSetHyperDR7(pVCpu, uNewDr7);
1716 }
1717 else
1718 {
1719#ifdef IN_RC
1720 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1721 {
1722 /** @todo restore host DBx registers. */
1723 }
1724#endif
1725 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1726 }
1727 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1728 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1729 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1730 pVCpu->cpum.s.Hyper.dr[7]));
1731
1732 return VINF_SUCCESS;
1733}
1734
1735
1736/**
1737 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1738 *
1739 * @returns true if in real mode, otherwise false.
1740 * @param pVCpu The virtual CPU handle.
1741 */
1742VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1743{
1744 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1745}
1746
1747
1748/**
1749 * Tests if the guest has the Page Size Extension enabled (PSE).
1750 *
1751 * @returns true if in real mode, otherwise false.
1752 * @param pVCpu The virtual CPU handle.
1753 */
1754VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1755{
1756 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1757 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1758}
1759
1760
1761/**
1762 * Tests if the guest has the paging enabled (PG).
1763 *
1764 * @returns true if in real mode, otherwise false.
1765 * @param pVCpu The virtual CPU handle.
1766 */
1767VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1768{
1769 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1770}
1771
1772
1773/**
1774 * Tests if the guest has the paging enabled (PG).
1775 *
1776 * @returns true if in real mode, otherwise false.
1777 * @param pVCpu The virtual CPU handle.
1778 */
1779VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1780{
1781 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1782}
1783
1784
1785/**
1786 * Tests if the guest is running in real mode or not.
1787 *
1788 * @returns true if in real mode, otherwise false.
1789 * @param pVCpu The virtual CPU handle.
1790 */
1791VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1792{
1793 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1794}
1795
1796
1797/**
1798 * Tests if the guest is running in protected or not.
1799 *
1800 * @returns true if in protected mode, otherwise false.
1801 * @param pVCpu The virtual CPU handle.
1802 */
1803VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1804{
1805 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1806}
1807
1808
1809/**
1810 * Tests if the guest is running in paged protected or not.
1811 *
1812 * @returns true if in paged protected mode, otherwise false.
1813 * @param pVCpu The virtual CPU handle.
1814 */
1815VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1816{
1817 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1818}
1819
1820
1821/**
1822 * Tests if the guest is running in long mode or not.
1823 *
1824 * @returns true if in long mode, otherwise false.
1825 * @param pVCpu The virtual CPU handle.
1826 */
1827VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1828{
1829 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1830}
1831
1832
1833/**
1834 * Tests if the guest is running in PAE mode or not.
1835 *
1836 * @returns true if in PAE mode, otherwise false.
1837 * @param pVCpu The virtual CPU handle.
1838 */
1839VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1840{
1841 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1842 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
1843 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1844}
1845
1846
1847
1848#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1849
1850/**
1851 * Transforms the guest CPU state to raw-ring mode.
1852 *
1853 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1854 *
1855 * @returns VBox status. (recompiler failure)
1856 * @param pVCpu The VMCPU handle.
1857 * @param pCtxCore The context core (for trap usage).
1858 * @see @ref pg_raw
1859 */
1860VMMDECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1861{
1862 PVM pVM = pVCpu->CTX_SUFF(pVM);
1863
1864 Assert(!pVM->cpum.s.fRawEntered);
1865 if (!pCtxCore)
1866 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
1867
1868 /*
1869 * Are we in Ring-0?
1870 */
1871 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1872 && !pCtxCore->eflags.Bits.u1VM)
1873 {
1874 /*
1875 * Enter execution mode.
1876 */
1877 PATMRawEnter(pVM, pCtxCore);
1878
1879 /*
1880 * Set CPL to Ring-1.
1881 */
1882 pCtxCore->ss |= 1;
1883 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1884 pCtxCore->cs |= 1;
1885 }
1886 else
1887 {
1888 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1889 ("ring-1 code not supported\n"));
1890 /*
1891 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1892 */
1893 PATMRawEnter(pVM, pCtxCore);
1894 }
1895
1896 /*
1897 * Assert sanity.
1898 */
1899 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1900 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1901 || pCtxCore->eflags.Bits.u1VM,
1902 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1903 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1904 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1905
1906 pVM->cpum.s.fRawEntered = true;
1907 return VINF_SUCCESS;
1908}
1909
1910
1911/**
1912 * Transforms the guest CPU state from raw-ring mode to correct values.
1913 *
1914 * This function will change any selector registers with DPL=1 to DPL=0.
1915 *
1916 * @returns Adjusted rc.
1917 * @param pVCpu The VMCPU handle.
1918 * @param rc Raw mode return code
1919 * @param pCtxCore The context core (for trap usage).
1920 * @see @ref pg_raw
1921 */
1922VMMDECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
1923{
1924 PVM pVM = pVCpu->CTX_SUFF(pVM);
1925
1926 /*
1927 * Don't leave if we've already left (in GC).
1928 */
1929 Assert(pVM->cpum.s.fRawEntered);
1930 if (!pVM->cpum.s.fRawEntered)
1931 return rc;
1932 pVM->cpum.s.fRawEntered = false;
1933
1934 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1935 if (!pCtxCore)
1936 pCtxCore = CPUMCTX2CORE(pCtx);
1937 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1938 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1939 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1940
1941 /*
1942 * Are we executing in raw ring-1?
1943 */
1944 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1945 && !pCtxCore->eflags.Bits.u1VM)
1946 {
1947 /*
1948 * Leave execution mode.
1949 */
1950 PATMRawLeave(pVM, pCtxCore, rc);
1951 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1952 /** @todo See what happens if we remove this. */
1953 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1954 pCtxCore->ds &= ~X86_SEL_RPL;
1955 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1956 pCtxCore->es &= ~X86_SEL_RPL;
1957 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1958 pCtxCore->fs &= ~X86_SEL_RPL;
1959 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1960 pCtxCore->gs &= ~X86_SEL_RPL;
1961
1962 /*
1963 * Ring-1 selector => Ring-0.
1964 */
1965 pCtxCore->ss &= ~X86_SEL_RPL;
1966 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1967 pCtxCore->cs &= ~X86_SEL_RPL;
1968 }
1969 else
1970 {
1971 /*
1972 * PATM is taking care of the IOPL and IF flags for us.
1973 */
1974 PATMRawLeave(pVM, pCtxCore, rc);
1975 if (!pCtxCore->eflags.Bits.u1VM)
1976 {
1977 /** @todo See what happens if we remove this. */
1978 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1979 pCtxCore->ds &= ~X86_SEL_RPL;
1980 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1981 pCtxCore->es &= ~X86_SEL_RPL;
1982 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1983 pCtxCore->fs &= ~X86_SEL_RPL;
1984 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1985 pCtxCore->gs &= ~X86_SEL_RPL;
1986 }
1987 }
1988
1989 return rc;
1990}
1991
1992/**
1993 * Updates the EFLAGS while we're in raw-mode.
1994 *
1995 * @param pVCpu The VMCPU handle.
1996 * @param pCtxCore The context core.
1997 * @param eflags The new EFLAGS value.
1998 */
1999VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
2000{
2001 PVM pVM = pVCpu->CTX_SUFF(pVM);
2002
2003 if (!pVM->cpum.s.fRawEntered)
2004 {
2005 pCtxCore->eflags.u32 = eflags;
2006 return;
2007 }
2008 PATMRawSetEFlags(pVM, pCtxCore, eflags);
2009}
2010
2011#endif /* !IN_RING0 */
2012
2013/**
2014 * Gets the EFLAGS while we're in raw-mode.
2015 *
2016 * @returns The eflags.
2017 * @param pVCpu The VMCPU handle.
2018 * @param pCtxCore The context core.
2019 */
2020VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2021{
2022#ifdef IN_RING0
2023 return pCtxCore->eflags.u32;
2024#else
2025 PVM pVM = pVCpu->CTX_SUFF(pVM);
2026
2027 if (!pVM->cpum.s.fRawEntered)
2028 return pCtxCore->eflags.u32;
2029 return PATMRawGetEFlags(pVM, pCtxCore);
2030#endif
2031}
2032
2033
2034/**
2035 * Gets and resets the changed flags (CPUM_CHANGED_*).
2036 * Only REM should call this function.
2037 *
2038 * @returns The changed flags.
2039 * @param pVCpu The VMCPU handle.
2040 */
2041VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVMCPU pVCpu)
2042{
2043 unsigned fFlags = pVCpu->cpum.s.fChanged;
2044 pVCpu->cpum.s.fChanged = 0;
2045 /** @todo change the switcher to use the fChanged flags. */
2046 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
2047 {
2048 fFlags |= CPUM_CHANGED_FPU_REM;
2049 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2050 }
2051 return fFlags;
2052}
2053
2054
2055/**
2056 * Sets the specified changed flags (CPUM_CHANGED_*).
2057 *
2058 * @param pVCpu The VMCPU handle.
2059 */
2060VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2061{
2062 pVCpu->cpum.s.fChanged |= fChangedFlags;
2063}
2064
2065
2066/**
2067 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2068 * @returns true if supported.
2069 * @returns false if not supported.
2070 * @param pVM The VM handle.
2071 */
2072VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2073{
2074 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2075}
2076
2077
2078/**
2079 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2080 * @returns true if used.
2081 * @returns false if not used.
2082 * @param pVM The VM handle.
2083 */
2084VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2085{
2086 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2087}
2088
2089
2090/**
2091 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2092 * @returns true if used.
2093 * @returns false if not used.
2094 * @param pVM The VM handle.
2095 */
2096VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2097{
2098 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2099}
2100
2101#ifndef IN_RING3
2102
2103/**
2104 * Lazily sync in the FPU/XMM state
2105 *
2106 * @returns VBox status code.
2107 * @param pVCpu VMCPU handle
2108 */
2109VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2110{
2111 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2112}
2113
2114#endif /* !IN_RING3 */
2115
2116/**
2117 * Checks if we activated the FPU/XMM state of the guest OS
2118 * @returns true if we did.
2119 * @returns false if not.
2120 * @param pVCpu The VMCPU handle.
2121 */
2122VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2123{
2124 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2125}
2126
2127
2128/**
2129 * Deactivate the FPU/XMM state of the guest OS
2130 * @param pVCpu The VMCPU handle.
2131 */
2132VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2133{
2134 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2135}
2136
2137
2138/**
2139 * Checks if the guest debug state is active
2140 *
2141 * @returns boolean
2142 * @param pVM VM handle.
2143 */
2144VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2145{
2146 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2147}
2148
2149/**
2150 * Checks if the hyper debug state is active
2151 *
2152 * @returns boolean
2153 * @param pVM VM handle.
2154 */
2155VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2156{
2157 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2158}
2159
2160
2161/**
2162 * Mark the guest's debug state as inactive
2163 *
2164 * @returns boolean
2165 * @param pVM VM handle.
2166 */
2167VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2168{
2169 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2170}
2171
2172
2173/**
2174 * Mark the hypervisor's debug state as inactive
2175 *
2176 * @returns boolean
2177 * @param pVM VM handle.
2178 */
2179VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2180{
2181 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2182}
2183
2184/**
2185 * Checks if the hidden selector registers are valid
2186 * @returns true if they are.
2187 * @returns false if not.
2188 * @param pVM The VM handle.
2189 */
2190VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
2191{
2192 return HWACCMIsEnabled(pVM);
2193}
2194
2195
2196
2197/**
2198 * Get the current privilege level of the guest.
2199 *
2200 * @returns cpl
2201 * @param pVM VM Handle.
2202 * @param pRegFrame Trap register frame.
2203 */
2204VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2205{
2206 uint32_t cpl;
2207
2208 if (CPUMAreHiddenSelRegsValid(pVCpu->CTX_SUFF(pVM)))
2209 {
2210 /*
2211 * The hidden CS.DPL register is always equal to the CPL, it is
2212 * not affected by loading a conforming coding segment.
2213 *
2214 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2215 * at SS. (ACP2 regression during install after a far call to ring 2)
2216 */
2217 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2218 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2219 else
2220 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2221 }
2222 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2223 {
2224 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2225 {
2226 /*
2227 * The SS RPL is always equal to the CPL, while the CS RPL
2228 * isn't necessarily equal if the segment is conforming.
2229 * See section 4.11.1 in the AMD manual.
2230 */
2231 cpl = (pCtxCore->ss & X86_SEL_RPL);
2232#ifndef IN_RING0
2233 if (cpl == 1)
2234 cpl = 0;
2235#endif
2236 }
2237 else
2238 cpl = 3;
2239 }
2240 else
2241 cpl = 0; /* real mode; cpl is zero */
2242
2243 return cpl;
2244}
2245
2246
2247/**
2248 * Gets the current guest CPU mode.
2249 *
2250 * If paging mode is what you need, check out PGMGetGuestMode().
2251 *
2252 * @returns The CPU mode.
2253 * @param pVCpu The VMCPU handle.
2254 */
2255VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2256{
2257 CPUMMODE enmMode;
2258 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2259 enmMode = CPUMMODE_REAL;
2260 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2261 enmMode = CPUMMODE_PROTECTED;
2262 else
2263 enmMode = CPUMMODE_LONG;
2264
2265 return enmMode;
2266}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette