VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 18976

Last change on this file since 18976 was 18927, checked in by vboxsync, 16 years ago

Big step to separate VMM data structures for guest SMP. (pgm, em)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 54.2 KB
Line 
1/* $Id: CPUMAllRegs.cpp 18927 2009-04-16 11:41:38Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <VBox/hwaccm.h>
37#include <VBox/tm.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#ifdef IN_RING3
41#include <iprt/thread.h>
42#endif
43
44/** Disable stack frame pointer generation here. */
45#if defined(_MSC_VER) && !defined(DEBUG)
46# pragma optimize("y", off)
47#endif
48
49
50/**
51 * Sets or resets an alternative hypervisor context core.
52 *
53 * This is called when we get a hypervisor trap set switch the context
54 * core with the trap frame on the stack. It is called again to reset
55 * back to the default context core when resuming hypervisor execution.
56 *
57 * @param pVCpu The VMCPU handle.
58 * @param pCtxCore Pointer to the alternative context core or NULL
59 * to go back to the default context core.
60 */
61VMMDECL(void) CPUMHyperSetCtxCore(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
62{
63 PVM pVM = pVCpu->CTX_SUFF(pVM);
64
65 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVCpu->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
66 if (!pCtxCore)
67 {
68 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
69 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
70 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
71 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
72 }
73 else
74 {
75 pVCpu->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
76 pVCpu->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
77 pVCpu->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
78 }
79}
80
81
82/**
83 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
84 * This is only for reading in order to save a few calls.
85 *
86 * @param pVM Handle to the virtual machine.
87 */
88VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
89{
90 return pVCpu->cpum.s.CTX_SUFF(pHyperCore);
91}
92
93
94/**
95 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
96 *
97 * @returns VBox status code.
98 * @param pVM Handle to the virtual machine.
99 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
100 *
101 * @deprecated This will *not* (and has never) given the right picture of the
102 * hypervisor register state. With CPUMHyperSetCtxCore() this is
103 * getting much worse. So, use the individual functions for getting
104 * and esp. setting the hypervisor registers.
105 */
106VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx)
107{
108 *ppCtx = &pVCpu->cpum.s.Hyper;
109 return VINF_SUCCESS;
110}
111
112
113VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
114{
115 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
116 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
117 pVCpu->cpum.s.Hyper.gdtrPadding = 0;
118}
119
120
121VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
122{
123 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
124 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
125 pVCpu->cpum.s.Hyper.idtrPadding = 0;
126}
127
128
129VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
130{
131 pVCpu->cpum.s.Hyper.cr3 = cr3;
132
133#ifdef IN_RC
134 /* Update the current CR3. */
135 ASMSetCR3(cr3);
136#endif
137}
138
139VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
140{
141 return pVCpu->cpum.s.Hyper.cr3;
142}
143
144
145VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
146{
147 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
148}
149
150
151VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
152{
153 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
154}
155
156
157VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
158{
159 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
160}
161
162
163VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
164{
165 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
166}
167
168
169VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
170{
171 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
172}
173
174
175VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
176{
177 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
178}
179
180
181VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
182{
183 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
184}
185
186
187VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
188{
189 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
190 return VINF_SUCCESS;
191}
192
193
194VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
195{
196 pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
197}
198
199
200VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
201{
202 pVCpu->cpum.s.Hyper.tr = SelTR;
203}
204
205
206VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
207{
208 pVCpu->cpum.s.Hyper.ldtr = SelLDTR;
209}
210
211
212VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
213{
214 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
215 /** @todo in GC we must load it! */
216}
217
218
219VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
220{
221 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
222 /** @todo in GC we must load it! */
223}
224
225
226VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
227{
228 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
229 /** @todo in GC we must load it! */
230}
231
232
233VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
234{
235 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
236 /** @todo in GC we must load it! */
237}
238
239
240VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
241{
242 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
243 /** @todo in GC we must load it! */
244}
245
246
247VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
248{
249 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
250 /** @todo in GC we must load it! */
251}
252
253
254VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
255{
256 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->cs;
257}
258
259
260VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
261{
262 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ds;
263}
264
265
266VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
267{
268 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->es;
269}
270
271
272VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
273{
274 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->fs;
275}
276
277
278VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
279{
280 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->gs;
281}
282
283
284VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
285{
286 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ss;
287}
288
289
290VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
291{
292 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eax;
293}
294
295
296VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
297{
298 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebx;
299}
300
301
302VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
303{
304 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ecx;
305}
306
307
308VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
309{
310 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edx;
311}
312
313
314VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
315{
316 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esi;
317}
318
319
320VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
321{
322 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->edi;
323}
324
325
326VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
327{
328 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->ebp;
329}
330
331
332VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
333{
334 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->esp;
335}
336
337
338VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
339{
340 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
341}
342
343
344VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
345{
346 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->eip;
347}
348
349
350VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
351{
352 return pVCpu->cpum.s.CTX_SUFF(pHyperCore)->rip;
353}
354
355
356VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
357{
358 if (pcbLimit)
359 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
360 return pVCpu->cpum.s.Hyper.idtr.pIdt;
361}
362
363
364VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
365{
366 if (pcbLimit)
367 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
368 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
369}
370
371
372VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
373{
374 return pVCpu->cpum.s.Hyper.ldtr;
375}
376
377
378VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
379{
380 return pVCpu->cpum.s.Hyper.dr[0];
381}
382
383
384VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
385{
386 return pVCpu->cpum.s.Hyper.dr[1];
387}
388
389
390VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
391{
392 return pVCpu->cpum.s.Hyper.dr[2];
393}
394
395
396VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
397{
398 return pVCpu->cpum.s.Hyper.dr[3];
399}
400
401
402VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
403{
404 return pVCpu->cpum.s.Hyper.dr[6];
405}
406
407
408VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
409{
410 return pVCpu->cpum.s.Hyper.dr[7];
411}
412
413
414/**
415 * Gets the pointer to the internal CPUMCTXCORE structure.
416 * This is only for reading in order to save a few calls.
417 *
418 * @param pVCpu Handle to the virtual cpu.
419 */
420VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
421{
422 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
423}
424
425
426/**
427 * Sets the guest context core registers.
428 *
429 * @param pVCpu Handle to the virtual cpu.
430 * @param pCtxCore The new context core values.
431 */
432VMMDECL(void) CPUMSetGuestCtxCore(PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore)
433{
434 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
435
436 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
437 *pCtxCoreDst = *pCtxCore;
438
439 /* Mask away invalid parts of the cpu context. */
440 if (!CPUMIsGuestInLongMode(pVCpu))
441 {
442 uint64_t u64Mask = UINT64_C(0xffffffff);
443
444 pCtxCoreDst->rip &= u64Mask;
445 pCtxCoreDst->rax &= u64Mask;
446 pCtxCoreDst->rbx &= u64Mask;
447 pCtxCoreDst->rcx &= u64Mask;
448 pCtxCoreDst->rdx &= u64Mask;
449 pCtxCoreDst->rsi &= u64Mask;
450 pCtxCoreDst->rdi &= u64Mask;
451 pCtxCoreDst->rbp &= u64Mask;
452 pCtxCoreDst->rsp &= u64Mask;
453 pCtxCoreDst->rflags.u &= u64Mask;
454
455 pCtxCoreDst->r8 = 0;
456 pCtxCoreDst->r9 = 0;
457 pCtxCoreDst->r10 = 0;
458 pCtxCoreDst->r11 = 0;
459 pCtxCoreDst->r12 = 0;
460 pCtxCoreDst->r13 = 0;
461 pCtxCoreDst->r14 = 0;
462 pCtxCoreDst->r15 = 0;
463 }
464}
465
466
467/**
468 * Queries the pointer to the internal CPUMCTX structure
469 *
470 * @returns The CPUMCTX pointer.
471 * @param pVCpu Handle to the virtual cpu.
472 */
473VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
474{
475 return &pVCpu->cpum.s.Guest;
476}
477
478VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
479{
480 pVCpu->cpum.s.Guest.gdtr.cbGdt = limit;
481 pVCpu->cpum.s.Guest.gdtr.pGdt = addr;
482 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
483 return VINF_SUCCESS;
484}
485
486VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
487{
488 pVCpu->cpum.s.Guest.idtr.cbIdt = limit;
489 pVCpu->cpum.s.Guest.idtr.pIdt = addr;
490 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
491 return VINF_SUCCESS;
492}
493
494VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
495{
496 AssertMsgFailed(("Need to load the hidden bits too!\n"));
497
498 pVCpu->cpum.s.Guest.tr = tr;
499 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
500 return VINF_SUCCESS;
501}
502
503VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
504{
505 pVCpu->cpum.s.Guest.ldtr = ldtr;
506 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
507 return VINF_SUCCESS;
508}
509
510
511/**
512 * Set the guest CR0.
513 *
514 * When called in GC, the hyper CR0 may be updated if that is
515 * required. The caller only has to take special action if AM,
516 * WP, PG or PE changes.
517 *
518 * @returns VINF_SUCCESS (consider it void).
519 * @param pVCpu Handle to the virtual cpu.
520 * @param cr0 The new CR0 value.
521 */
522VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
523{
524#ifdef IN_RC
525 /*
526 * Check if we need to change hypervisor CR0 because
527 * of math stuff.
528 */
529 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
530 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
531 {
532 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
533 {
534 /*
535 * We haven't saved the host FPU state yet, so TS and MT are both set
536 * and EM should be reflecting the guest EM (it always does this).
537 */
538 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
539 {
540 uint32_t HyperCR0 = ASMGetCR0();
541 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
542 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
543 HyperCR0 &= ~X86_CR0_EM;
544 HyperCR0 |= cr0 & X86_CR0_EM;
545 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
546 ASMSetCR0(HyperCR0);
547 }
548# ifdef VBOX_STRICT
549 else
550 {
551 uint32_t HyperCR0 = ASMGetCR0();
552 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
553 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
554 }
555# endif
556 }
557 else
558 {
559 /*
560 * Already saved the state, so we're just mirroring
561 * the guest flags.
562 */
563 uint32_t HyperCR0 = ASMGetCR0();
564 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
565 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
566 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
567 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
568 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
569 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
570 ASMSetCR0(HyperCR0);
571 }
572 }
573#endif /* IN_RC */
574
575 /*
576 * Check for changes causing TLB flushes (for REM).
577 * The caller is responsible for calling PGM when appropriate.
578 */
579 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
580 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
581 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
582 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
583
584 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
585 return VINF_SUCCESS;
586}
587
588
589VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
590{
591 pVCpu->cpum.s.Guest.cr2 = cr2;
592 return VINF_SUCCESS;
593}
594
595
596VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
597{
598 pVCpu->cpum.s.Guest.cr3 = cr3;
599 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
600 return VINF_SUCCESS;
601}
602
603
604VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
605{
606 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
607 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
608 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
609 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
610 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
611 cr4 &= ~X86_CR4_OSFSXR;
612 pVCpu->cpum.s.Guest.cr4 = cr4;
613 return VINF_SUCCESS;
614}
615
616
617VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
618{
619 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
620 return VINF_SUCCESS;
621}
622
623
624VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
625{
626 pVCpu->cpum.s.Guest.eip = eip;
627 return VINF_SUCCESS;
628}
629
630
631VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
632{
633 pVCpu->cpum.s.Guest.eax = eax;
634 return VINF_SUCCESS;
635}
636
637
638VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
639{
640 pVCpu->cpum.s.Guest.ebx = ebx;
641 return VINF_SUCCESS;
642}
643
644
645VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
646{
647 pVCpu->cpum.s.Guest.ecx = ecx;
648 return VINF_SUCCESS;
649}
650
651
652VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
653{
654 pVCpu->cpum.s.Guest.edx = edx;
655 return VINF_SUCCESS;
656}
657
658
659VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
660{
661 pVCpu->cpum.s.Guest.esp = esp;
662 return VINF_SUCCESS;
663}
664
665
666VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
667{
668 pVCpu->cpum.s.Guest.ebp = ebp;
669 return VINF_SUCCESS;
670}
671
672
673VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
674{
675 pVCpu->cpum.s.Guest.esi = esi;
676 return VINF_SUCCESS;
677}
678
679
680VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
681{
682 pVCpu->cpum.s.Guest.edi = edi;
683 return VINF_SUCCESS;
684}
685
686
687VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
688{
689 pVCpu->cpum.s.Guest.ss = ss;
690 return VINF_SUCCESS;
691}
692
693
694VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
695{
696 pVCpu->cpum.s.Guest.cs = cs;
697 return VINF_SUCCESS;
698}
699
700
701VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
702{
703 pVCpu->cpum.s.Guest.ds = ds;
704 return VINF_SUCCESS;
705}
706
707
708VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
709{
710 pVCpu->cpum.s.Guest.es = es;
711 return VINF_SUCCESS;
712}
713
714
715VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
716{
717 pVCpu->cpum.s.Guest.fs = fs;
718 return VINF_SUCCESS;
719}
720
721
722VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
723{
724 pVCpu->cpum.s.Guest.gs = gs;
725 return VINF_SUCCESS;
726}
727
728
729VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
730{
731 pVCpu->cpum.s.Guest.msrEFER = val;
732}
733
734
735VMMDECL(uint64_t) CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
736{
737 uint64_t u64 = 0;
738
739 switch (idMsr)
740 {
741 case MSR_IA32_TSC:
742 u64 = TMCpuTickGet(pVCpu->CTX_SUFF(pVM));
743 break;
744
745 case MSR_IA32_CR_PAT:
746 u64 = pVCpu->cpum.s.Guest.msrPAT;
747 break;
748
749 case MSR_IA32_SYSENTER_CS:
750 u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
751 break;
752
753 case MSR_IA32_SYSENTER_EIP:
754 u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
755 break;
756
757 case MSR_IA32_SYSENTER_ESP:
758 u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
759 break;
760
761 case MSR_K6_EFER:
762 u64 = pVCpu->cpum.s.Guest.msrEFER;
763 break;
764
765 case MSR_K8_SF_MASK:
766 u64 = pVCpu->cpum.s.Guest.msrSFMASK;
767 break;
768
769 case MSR_K6_STAR:
770 u64 = pVCpu->cpum.s.Guest.msrSTAR;
771 break;
772
773 case MSR_K8_LSTAR:
774 u64 = pVCpu->cpum.s.Guest.msrLSTAR;
775 break;
776
777 case MSR_K8_CSTAR:
778 u64 = pVCpu->cpum.s.Guest.msrCSTAR;
779 break;
780
781 case MSR_K8_KERNEL_GS_BASE:
782 u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
783 break;
784
785 case MSR_K8_TSC_AUX:
786 u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
787 break;
788
789 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
790 default:
791 AssertFailed();
792 break;
793 }
794 return u64;
795}
796
797VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
798{
799 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
800 switch (idMsr)
801 {
802 case MSR_K8_TSC_AUX:
803 pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
804 break;
805
806 default:
807 AssertFailed();
808 break;
809 }
810}
811
812VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
813{
814 if (pcbLimit)
815 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
816 return pVCpu->cpum.s.Guest.idtr.pIdt;
817}
818
819
820VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
821{
822 if (pHidden)
823 *pHidden = pVCpu->cpum.s.Guest.trHid;
824 return pVCpu->cpum.s.Guest.tr;
825}
826
827
828VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
829{
830 return pVCpu->cpum.s.Guest.cs;
831}
832
833
834VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
835{
836 return pVCpu->cpum.s.Guest.ds;
837}
838
839
840VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
841{
842 return pVCpu->cpum.s.Guest.es;
843}
844
845
846VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
847{
848 return pVCpu->cpum.s.Guest.fs;
849}
850
851
852VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
853{
854 return pVCpu->cpum.s.Guest.gs;
855}
856
857
858VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
859{
860 return pVCpu->cpum.s.Guest.ss;
861}
862
863
864VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
865{
866 return pVCpu->cpum.s.Guest.ldtr;
867}
868
869
870VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
871{
872 return pVCpu->cpum.s.Guest.cr0;
873}
874
875
876VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
877{
878 return pVCpu->cpum.s.Guest.cr2;
879}
880
881
882VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
883{
884 return pVCpu->cpum.s.Guest.cr3;
885}
886
887
888VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
889{
890 return pVCpu->cpum.s.Guest.cr4;
891}
892
893
894VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
895{
896 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
897}
898
899
900VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
901{
902 return pVCpu->cpum.s.Guest.eip;
903}
904
905
906VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
907{
908 return pVCpu->cpum.s.Guest.rip;
909}
910
911
912VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
913{
914 return pVCpu->cpum.s.Guest.eax;
915}
916
917
918VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
919{
920 return pVCpu->cpum.s.Guest.ebx;
921}
922
923
924VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
925{
926 return pVCpu->cpum.s.Guest.ecx;
927}
928
929
930VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
931{
932 return pVCpu->cpum.s.Guest.edx;
933}
934
935
936VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
937{
938 return pVCpu->cpum.s.Guest.esi;
939}
940
941
942VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
943{
944 return pVCpu->cpum.s.Guest.edi;
945}
946
947
948VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
949{
950 return pVCpu->cpum.s.Guest.esp;
951}
952
953
954VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
955{
956 return pVCpu->cpum.s.Guest.ebp;
957}
958
959
960VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
961{
962 return pVCpu->cpum.s.Guest.eflags.u32;
963}
964
965
966///@todo: crx should be an array
967VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
968{
969 switch (iReg)
970 {
971 case USE_REG_CR0:
972 *pValue = pVCpu->cpum.s.Guest.cr0;
973 break;
974 case USE_REG_CR2:
975 *pValue = pVCpu->cpum.s.Guest.cr2;
976 break;
977 case USE_REG_CR3:
978 *pValue = pVCpu->cpum.s.Guest.cr3;
979 break;
980 case USE_REG_CR4:
981 *pValue = pVCpu->cpum.s.Guest.cr4;
982 break;
983 default:
984 return VERR_INVALID_PARAMETER;
985 }
986 return VINF_SUCCESS;
987}
988
989
990VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
991{
992 return pVCpu->cpum.s.Guest.dr[0];
993}
994
995
996VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
997{
998 return pVCpu->cpum.s.Guest.dr[1];
999}
1000
1001
1002VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1003{
1004 return pVCpu->cpum.s.Guest.dr[2];
1005}
1006
1007
1008VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1009{
1010 return pVCpu->cpum.s.Guest.dr[3];
1011}
1012
1013
1014VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1015{
1016 return pVCpu->cpum.s.Guest.dr[6];
1017}
1018
1019
1020VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1021{
1022 return pVCpu->cpum.s.Guest.dr[7];
1023}
1024
1025
1026VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1027{
1028 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1029 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1030 if (iReg == 4 || iReg == 5)
1031 iReg += 2;
1032 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1033 return VINF_SUCCESS;
1034}
1035
1036
1037VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1038{
1039 return pVCpu->cpum.s.Guest.msrEFER;
1040}
1041
1042
1043/**
1044 * Gets a CpuId leaf.
1045 *
1046 * @param pVM The VM handle.
1047 * @param iLeaf The CPUID leaf to get.
1048 * @param pEax Where to store the EAX value.
1049 * @param pEbx Where to store the EBX value.
1050 * @param pEcx Where to store the ECX value.
1051 * @param pEdx Where to store the EDX value.
1052 */
1053VMMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1054{
1055 PCCPUMCPUID pCpuId;
1056 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1057 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1058 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1059 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1060 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1061 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1062 else
1063 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1064
1065 *pEax = pCpuId->eax;
1066 *pEbx = pCpuId->ebx;
1067 *pEcx = pCpuId->ecx;
1068 *pEdx = pCpuId->edx;
1069 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1070}
1071
1072
1073/**
1074 * Gets a pointer to the array of standard CPUID leafs.
1075 *
1076 * CPUMGetGuestCpuIdStdMax() give the size of the array.
1077 *
1078 * @returns Pointer to the standard CPUID leafs (read-only).
1079 * @param pVM The VM handle.
1080 * @remark Intended for PATM.
1081 */
1082VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdRCPtr(PVM pVM)
1083{
1084 return RCPTRTYPE(PCCPUMCPUID)VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
1085}
1086
1087
1088/**
1089 * Gets a pointer to the array of extended CPUID leafs.
1090 *
1091 * CPUMGetGuestCpuIdExtMax() give the size of the array.
1092 *
1093 * @returns Pointer to the extended CPUID leafs (read-only).
1094 * @param pVM The VM handle.
1095 * @remark Intended for PATM.
1096 */
1097VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtRCPtr(PVM pVM)
1098{
1099 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
1100}
1101
1102
1103/**
1104 * Gets a pointer to the array of centaur CPUID leafs.
1105 *
1106 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
1107 *
1108 * @returns Pointer to the centaur CPUID leafs (read-only).
1109 * @param pVM The VM handle.
1110 * @remark Intended for PATM.
1111 */
1112VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurRCPtr(PVM pVM)
1113{
1114 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
1115}
1116
1117
1118/**
1119 * Gets a pointer to the default CPUID leaf.
1120 *
1121 * @returns Pointer to the default CPUID leaf (read-only).
1122 * @param pVM The VM handle.
1123 * @remark Intended for PATM.
1124 */
1125VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefRCPtr(PVM pVM)
1126{
1127 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
1128}
1129
1130
1131/**
1132 * Gets a number of standard CPUID leafs.
1133 *
1134 * @returns Number of leafs.
1135 * @param pVM The VM handle.
1136 * @remark Intended for PATM.
1137 */
1138VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1139{
1140 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1141}
1142
1143
1144/**
1145 * Gets a number of extended CPUID leafs.
1146 *
1147 * @returns Number of leafs.
1148 * @param pVM The VM handle.
1149 * @remark Intended for PATM.
1150 */
1151VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1152{
1153 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1154}
1155
1156
1157/**
1158 * Gets a number of centaur CPUID leafs.
1159 *
1160 * @returns Number of leafs.
1161 * @param pVM The VM handle.
1162 * @remark Intended for PATM.
1163 */
1164VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1165{
1166 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1167}
1168
1169
1170/**
1171 * Sets a CPUID feature bit.
1172 *
1173 * @param pVM The VM Handle.
1174 * @param enmFeature The feature to set.
1175 */
1176VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1177{
1178 switch (enmFeature)
1179 {
1180 /*
1181 * Set the APIC bit in both feature masks.
1182 */
1183 case CPUMCPUIDFEATURE_APIC:
1184 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1185 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1186 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1187 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1188 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1189 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1190 break;
1191
1192 /*
1193 * Set the x2APIC bit in the standard feature mask.
1194 */
1195 case CPUMCPUIDFEATURE_X2APIC:
1196 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1197 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1198 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1199 break;
1200
1201 /*
1202 * Set the sysenter/sysexit bit in the standard feature mask.
1203 * Assumes the caller knows what it's doing! (host must support these)
1204 */
1205 case CPUMCPUIDFEATURE_SEP:
1206 {
1207 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1208 {
1209 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1210 return;
1211 }
1212
1213 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1214 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1215 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1216 break;
1217 }
1218
1219 /*
1220 * Set the syscall/sysret bit in the extended feature mask.
1221 * Assumes the caller knows what it's doing! (host must support these)
1222 */
1223 case CPUMCPUIDFEATURE_SYSCALL:
1224 {
1225 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1226 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1227 {
1228#if HC_ARCH_BITS == 32
1229 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1230 * Even when the cpu is capable of doing so in 64 bits mode.
1231 */
1232 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1233 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1234 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1235#endif
1236 {
1237 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1238 return;
1239 }
1240 }
1241 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1242 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1243 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1244 break;
1245 }
1246
1247 /*
1248 * Set the PAE bit in both feature masks.
1249 * Assumes the caller knows what it's doing! (host must support these)
1250 */
1251 case CPUMCPUIDFEATURE_PAE:
1252 {
1253 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1254 {
1255 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1256 return;
1257 }
1258
1259 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1260 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1261 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1262 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1263 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1264 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1265 break;
1266 }
1267
1268 /*
1269 * Set the LONG MODE bit in the extended feature mask.
1270 * Assumes the caller knows what it's doing! (host must support these)
1271 */
1272 case CPUMCPUIDFEATURE_LONG_MODE:
1273 {
1274 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1275 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1276 {
1277 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1278 return;
1279 }
1280
1281 /* Valid for both Intel and AMD. */
1282 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1283 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1284 break;
1285 }
1286
1287 /*
1288 * Set the NXE bit in the extended feature mask.
1289 * Assumes the caller knows what it's doing! (host must support these)
1290 */
1291 case CPUMCPUIDFEATURE_NXE:
1292 {
1293 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1294 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1295 {
1296 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1297 return;
1298 }
1299
1300 /* Valid for both Intel and AMD. */
1301 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1302 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1303 break;
1304 }
1305
1306 case CPUMCPUIDFEATURE_LAHF:
1307 {
1308 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1309 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1310 {
1311 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1312 return;
1313 }
1314
1315 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1316 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1317 break;
1318 }
1319
1320 case CPUMCPUIDFEATURE_PAT:
1321 {
1322 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1323 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1324 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1325 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1326 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1327 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1328 break;
1329 }
1330
1331 case CPUMCPUIDFEATURE_RDTSCP:
1332 {
1333 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1334 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP))
1335 {
1336 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1337 return;
1338 }
1339
1340 /* Valid for AMD only (for now). */
1341 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1342 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1343 break;
1344 }
1345
1346 default:
1347 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1348 break;
1349 }
1350 for (unsigned i=0;i<pVM->cCPUs;i++)
1351 {
1352 PVMCPU pVCpu = &pVM->aCpus[i];
1353
1354 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1355 }
1356}
1357
1358
1359/**
1360 * Queries a CPUID feature bit.
1361 *
1362 * @returns boolean for feature presence
1363 * @param pVM The VM Handle.
1364 * @param enmFeature The feature to query.
1365 */
1366VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1367{
1368 switch (enmFeature)
1369 {
1370 case CPUMCPUIDFEATURE_PAE:
1371 {
1372 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1373 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1374 break;
1375 }
1376
1377 case CPUMCPUIDFEATURE_RDTSCP:
1378 {
1379 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1380 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1381 break;
1382 }
1383
1384 case CPUMCPUIDFEATURE_LONG_MODE:
1385 {
1386 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1387 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1388 break;
1389 }
1390
1391 default:
1392 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1393 break;
1394 }
1395 return false;
1396}
1397
1398
1399/**
1400 * Clears a CPUID feature bit.
1401 *
1402 * @param pVM The VM Handle.
1403 * @param enmFeature The feature to clear.
1404 */
1405VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1406{
1407 switch (enmFeature)
1408 {
1409 /*
1410 * Set the APIC bit in both feature masks.
1411 */
1412 case CPUMCPUIDFEATURE_APIC:
1413 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1414 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1415 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1416 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1417 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1418 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1419 break;
1420
1421 /*
1422 * Clear the x2APIC bit in the standard feature mask.
1423 */
1424 case CPUMCPUIDFEATURE_X2APIC:
1425 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1426 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1427 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1428 break;
1429
1430 case CPUMCPUIDFEATURE_PAE:
1431 {
1432 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1433 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1434 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1435 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1436 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1437 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1438 break;
1439 }
1440
1441 case CPUMCPUIDFEATURE_PAT:
1442 {
1443 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1444 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1445 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1446 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1447 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1448 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1449 break;
1450 }
1451
1452 case CPUMCPUIDFEATURE_LONG_MODE:
1453 {
1454 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1455 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1456 break;
1457 }
1458
1459 case CPUMCPUIDFEATURE_LAHF:
1460 {
1461 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1462 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1463 break;
1464 }
1465
1466 default:
1467 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1468 break;
1469 }
1470 for (unsigned i=0;i<pVM->cCPUs;i++)
1471 {
1472 PVMCPU pVCpu = &pVM->aCpus[i];
1473
1474 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1475 }
1476}
1477
1478
1479/**
1480 * Gets the CPU vendor
1481 *
1482 * @returns CPU vendor
1483 * @param pVM The VM handle.
1484 */
1485VMMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM)
1486{
1487 return pVM->cpum.s.enmCPUVendor;
1488}
1489
1490
1491VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1492{
1493 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1494 return CPUMRecalcHyperDRx(pVCpu);
1495}
1496
1497
1498VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1499{
1500 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1501 return CPUMRecalcHyperDRx(pVCpu);
1502}
1503
1504
1505VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1506{
1507 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1508 return CPUMRecalcHyperDRx(pVCpu);
1509}
1510
1511
1512VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1513{
1514 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1515 return CPUMRecalcHyperDRx(pVCpu);
1516}
1517
1518
1519VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1520{
1521 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1522 return CPUMRecalcHyperDRx(pVCpu);
1523}
1524
1525
1526VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1527{
1528 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1529 return CPUMRecalcHyperDRx(pVCpu);
1530}
1531
1532
1533VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1534{
1535 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1536 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1537 if (iReg == 4 || iReg == 5)
1538 iReg += 2;
1539 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1540 return CPUMRecalcHyperDRx(pVCpu);
1541}
1542
1543
1544/**
1545 * Recalculates the hypvervisor DRx register values based on
1546 * current guest registers and DBGF breakpoints.
1547 *
1548 * This is called whenever a guest DRx register is modified and when DBGF
1549 * sets a hardware breakpoint. In guest context this function will reload
1550 * any (hyper) DRx registers which comes out with a different value.
1551 *
1552 * @returns VINF_SUCCESS.
1553 * @param pVCpu The VMCPU handle.
1554 */
1555VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
1556{
1557 PVM pVM = pVCpu->CTX_SUFF(pVM);
1558
1559 /*
1560 * Compare the DR7s first.
1561 *
1562 * We only care about the enabled flags. The GE and LE flags are always
1563 * set and we don't care if the guest doesn't set them. GD is virtualized
1564 * when we dispatch #DB, we never enable it.
1565 */
1566 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1567#ifdef CPUM_VIRTUALIZE_DRX
1568 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1569#else
1570 const RTGCUINTREG uGstDr7 = 0;
1571#endif
1572 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1573 {
1574 /*
1575 * Ok, something is enabled. Recalc each of the breakpoints.
1576 * Straight forward code, not optimized/minimized in any way.
1577 */
1578 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1579
1580 /* bp 0 */
1581 RTGCUINTREG uNewDr0;
1582 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1583 {
1584 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1585 uNewDr0 = DBGFBpGetDR0(pVM);
1586 }
1587 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1588 {
1589 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1590 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1591 }
1592 else
1593 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
1594
1595 /* bp 1 */
1596 RTGCUINTREG uNewDr1;
1597 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1598 {
1599 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1600 uNewDr1 = DBGFBpGetDR1(pVM);
1601 }
1602 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1603 {
1604 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1605 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1606 }
1607 else
1608 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
1609
1610 /* bp 2 */
1611 RTGCUINTREG uNewDr2;
1612 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1613 {
1614 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1615 uNewDr2 = DBGFBpGetDR2(pVM);
1616 }
1617 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1618 {
1619 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1620 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1621 }
1622 else
1623 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
1624
1625 /* bp 3 */
1626 RTGCUINTREG uNewDr3;
1627 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1628 {
1629 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1630 uNewDr3 = DBGFBpGetDR3(pVM);
1631 }
1632 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1633 {
1634 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1635 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1636 }
1637 else
1638 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
1639
1640 /*
1641 * Apply the updates.
1642 */
1643#ifdef IN_RC
1644 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
1645 {
1646 /** @todo save host DBx registers. */
1647 }
1648#endif
1649 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
1650 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1651 CPUMSetHyperDR3(pVCpu, uNewDr3);
1652 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1653 CPUMSetHyperDR2(pVCpu, uNewDr2);
1654 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1655 CPUMSetHyperDR1(pVCpu, uNewDr1);
1656 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1657 CPUMSetHyperDR0(pVCpu, uNewDr0);
1658 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1659 CPUMSetHyperDR7(pVCpu, uNewDr7);
1660 }
1661 else
1662 {
1663#ifdef IN_RC
1664 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
1665 {
1666 /** @todo restore host DBx registers. */
1667 }
1668#endif
1669 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1670 }
1671 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1672 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1673 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1674 pVCpu->cpum.s.Hyper.dr[7]));
1675
1676 return VINF_SUCCESS;
1677}
1678
1679#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1680
1681/**
1682 * Transforms the guest CPU state to raw-ring mode.
1683 *
1684 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1685 *
1686 * @returns VBox status. (recompiler failure)
1687 * @param pVCpu The VMCPU handle.
1688 * @param pCtxCore The context core (for trap usage).
1689 * @see @ref pg_raw
1690 */
1691VMMDECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1692{
1693 PVM pVM = pVCpu->CTX_SUFF(pVM);
1694
1695 Assert(!pVM->cpum.s.fRawEntered);
1696 if (!pCtxCore)
1697 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
1698
1699 /*
1700 * Are we in Ring-0?
1701 */
1702 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1703 && !pCtxCore->eflags.Bits.u1VM)
1704 {
1705 /*
1706 * Enter execution mode.
1707 */
1708 PATMRawEnter(pVM, pCtxCore);
1709
1710 /*
1711 * Set CPL to Ring-1.
1712 */
1713 pCtxCore->ss |= 1;
1714 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1715 pCtxCore->cs |= 1;
1716 }
1717 else
1718 {
1719 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1720 ("ring-1 code not supported\n"));
1721 /*
1722 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1723 */
1724 PATMRawEnter(pVM, pCtxCore);
1725 }
1726
1727 /*
1728 * Assert sanity.
1729 */
1730 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1731 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1732 || pCtxCore->eflags.Bits.u1VM,
1733 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1734 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1735 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1736
1737 pVM->cpum.s.fRawEntered = true;
1738 return VINF_SUCCESS;
1739}
1740
1741
1742/**
1743 * Transforms the guest CPU state from raw-ring mode to correct values.
1744 *
1745 * This function will change any selector registers with DPL=1 to DPL=0.
1746 *
1747 * @returns Adjusted rc.
1748 * @param pVCpu The VMCPU handle.
1749 * @param rc Raw mode return code
1750 * @param pCtxCore The context core (for trap usage).
1751 * @see @ref pg_raw
1752 */
1753VMMDECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
1754{
1755 PVM pVM = pVCpu->CTX_SUFF(pVM);
1756
1757 /*
1758 * Don't leave if we've already left (in GC).
1759 */
1760 Assert(pVM->cpum.s.fRawEntered);
1761 if (!pVM->cpum.s.fRawEntered)
1762 return rc;
1763 pVM->cpum.s.fRawEntered = false;
1764
1765 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1766 if (!pCtxCore)
1767 pCtxCore = CPUMCTX2CORE(pCtx);
1768 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1769 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1770 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1771
1772 /*
1773 * Are we executing in raw ring-1?
1774 */
1775 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1776 && !pCtxCore->eflags.Bits.u1VM)
1777 {
1778 /*
1779 * Leave execution mode.
1780 */
1781 PATMRawLeave(pVM, pCtxCore, rc);
1782 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1783 /** @todo See what happens if we remove this. */
1784 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1785 pCtxCore->ds &= ~X86_SEL_RPL;
1786 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1787 pCtxCore->es &= ~X86_SEL_RPL;
1788 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1789 pCtxCore->fs &= ~X86_SEL_RPL;
1790 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1791 pCtxCore->gs &= ~X86_SEL_RPL;
1792
1793 /*
1794 * Ring-1 selector => Ring-0.
1795 */
1796 pCtxCore->ss &= ~X86_SEL_RPL;
1797 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1798 pCtxCore->cs &= ~X86_SEL_RPL;
1799 }
1800 else
1801 {
1802 /*
1803 * PATM is taking care of the IOPL and IF flags for us.
1804 */
1805 PATMRawLeave(pVM, pCtxCore, rc);
1806 if (!pCtxCore->eflags.Bits.u1VM)
1807 {
1808 /** @todo See what happens if we remove this. */
1809 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1810 pCtxCore->ds &= ~X86_SEL_RPL;
1811 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1812 pCtxCore->es &= ~X86_SEL_RPL;
1813 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1814 pCtxCore->fs &= ~X86_SEL_RPL;
1815 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1816 pCtxCore->gs &= ~X86_SEL_RPL;
1817 }
1818 }
1819
1820 return rc;
1821}
1822
1823/**
1824 * Updates the EFLAGS while we're in raw-mode.
1825 *
1826 * @param pVCpu The VMCPU handle.
1827 * @param pCtxCore The context core.
1828 * @param eflags The new EFLAGS value.
1829 */
1830VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1831{
1832 PVM pVM = pVCpu->CTX_SUFF(pVM);
1833
1834 if (!pVM->cpum.s.fRawEntered)
1835 {
1836 pCtxCore->eflags.u32 = eflags;
1837 return;
1838 }
1839 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1840}
1841
1842#endif /* !IN_RING0 */
1843
1844/**
1845 * Gets the EFLAGS while we're in raw-mode.
1846 *
1847 * @returns The eflags.
1848 * @param pVCpu The VMCPU handle.
1849 * @param pCtxCore The context core.
1850 */
1851VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
1852{
1853#ifdef IN_RING0
1854 return pCtxCore->eflags.u32;
1855#else
1856 PVM pVM = pVCpu->CTX_SUFF(pVM);
1857
1858 if (!pVM->cpum.s.fRawEntered)
1859 return pCtxCore->eflags.u32;
1860 return PATMRawGetEFlags(pVM, pCtxCore);
1861#endif
1862}
1863
1864
1865/**
1866 * Gets and resets the changed flags (CPUM_CHANGED_*).
1867 * Only REM should call this function.
1868 *
1869 * @returns The changed flags.
1870 * @param pVCpu The VMCPU handle.
1871 */
1872VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVMCPU pVCpu)
1873{
1874 unsigned fFlags = pVCpu->cpum.s.fChanged;
1875 pVCpu->cpum.s.fChanged = 0;
1876 /** @todo change the switcher to use the fChanged flags. */
1877 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
1878 {
1879 fFlags |= CPUM_CHANGED_FPU_REM;
1880 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1881 }
1882 return fFlags;
1883}
1884
1885
1886/**
1887 * Sets the specified changed flags (CPUM_CHANGED_*).
1888 *
1889 * @param pVCpu The VMCPU handle.
1890 */
1891VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
1892{
1893 pVCpu->cpum.s.fChanged |= fChangedFlags;
1894}
1895
1896
1897/**
1898 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1899 * @returns true if supported.
1900 * @returns false if not supported.
1901 * @param pVM The VM handle.
1902 */
1903VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1904{
1905 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1906}
1907
1908
1909/**
1910 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1911 * @returns true if used.
1912 * @returns false if not used.
1913 * @param pVM The VM handle.
1914 */
1915VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1916{
1917 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
1918}
1919
1920
1921/**
1922 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1923 * @returns true if used.
1924 * @returns false if not used.
1925 * @param pVM The VM handle.
1926 */
1927VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1928{
1929 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
1930}
1931
1932#ifndef IN_RING3
1933
1934/**
1935 * Lazily sync in the FPU/XMM state
1936 *
1937 * @returns VBox status code.
1938 * @param pVCpu VMCPU handle
1939 */
1940VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
1941{
1942 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
1943}
1944
1945#endif /* !IN_RING3 */
1946
1947/**
1948 * Checks if we activated the FPU/XMM state of the guest OS
1949 * @returns true if we did.
1950 * @returns false if not.
1951 * @param pVCpu The VMCPU handle.
1952 */
1953VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1954{
1955 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
1956}
1957
1958
1959/**
1960 * Deactivate the FPU/XMM state of the guest OS
1961 * @param pVCpu The VMCPU handle.
1962 */
1963VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
1964{
1965 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
1966}
1967
1968
1969/**
1970 * Checks if the guest debug state is active
1971 *
1972 * @returns boolean
1973 * @param pVM VM handle.
1974 */
1975VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1976{
1977 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
1978}
1979
1980
1981/**
1982 * Mark the guest's debug state as inactive
1983 *
1984 * @returns boolean
1985 * @param pVM VM handle.
1986 */
1987VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1988{
1989 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1990}
1991
1992
1993/**
1994 * Checks if the hidden selector registers are valid
1995 * @returns true if they are.
1996 * @returns false if not.
1997 * @param pVM The VM handle.
1998 */
1999VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
2000{
2001 return HWACCMIsEnabled(pVM);
2002}
2003
2004
2005
2006/**
2007 * Get the current privilege level of the guest.
2008 *
2009 * @returns cpl
2010 * @param pVM VM Handle.
2011 * @param pRegFrame Trap register frame.
2012 */
2013VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2014{
2015 uint32_t cpl;
2016
2017 if (CPUMAreHiddenSelRegsValid(pVCpu->CTX_SUFF(pVM)))
2018 {
2019 /*
2020 * The hidden CS.DPL register is always equal to the CPL, it is
2021 * not affected by loading a conforming coding segment.
2022 *
2023 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2024 * at SS. (ACP2 regression during install after a far call to ring 2)
2025 */
2026 if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2027 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2028 else
2029 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2030 }
2031 else if (RT_LIKELY(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2032 {
2033 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2034 {
2035 /*
2036 * The SS RPL is always equal to the CPL, while the CS RPL
2037 * isn't necessarily equal if the segment is conforming.
2038 * See section 4.11.1 in the AMD manual.
2039 */
2040 cpl = (pCtxCore->ss & X86_SEL_RPL);
2041#ifndef IN_RING0
2042 if (cpl == 1)
2043 cpl = 0;
2044#endif
2045 }
2046 else
2047 cpl = 3;
2048 }
2049 else
2050 cpl = 0; /* real mode; cpl is zero */
2051
2052 return cpl;
2053}
2054
2055
2056/**
2057 * Gets the current guest CPU mode.
2058 *
2059 * If paging mode is what you need, check out PGMGetGuestMode().
2060 *
2061 * @returns The CPU mode.
2062 * @param pVCpu The VMCPU handle.
2063 */
2064VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2065{
2066 CPUMMODE enmMode;
2067 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2068 enmMode = CPUMMODE_REAL;
2069 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2070 enmMode = CPUMMODE_PROTECTED;
2071 else
2072 enmMode = CPUMMODE_LONG;
2073
2074 return enmMode;
2075}
2076
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette