VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 16921

Last change on this file since 16921 was 16859, checked in by vboxsync, 16 years ago

Load hypervisor CR3 from CPUM (instead of hardcoded fixups in the switchers). Dangerous change. Watch for regressions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 57.0 KB
Line 
1/* $Id: CPUMAllRegs.cpp 16859 2009-02-17 16:19:51Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#ifdef IN_RING3
39#include <iprt/thread.h>
40#endif
41
42/** Disable stack frame pointer generation here. */
43#if defined(_MSC_VER) && !defined(DEBUG)
44# pragma optimize("y", off)
45#endif
46
47
48/**
49 * Sets or resets an alternative hypervisor context core.
50 *
51 * This is called when we get a hypervisor trap set switch the context
52 * core with the trap frame on the stack. It is called again to reset
53 * back to the default context core when resuming hypervisor execution.
54 *
55 * @param pVM The VM handle.
56 * @param pCtxCore Pointer to the alternative context core or NULL
57 * to go back to the default context core.
58 */
59VMMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
60{
61 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
62 if (!pCtxCore)
63 {
64 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
65 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
66 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
67 pVM->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
68 }
69 else
70 {
71 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
72 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
73 pVM->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
74 }
75}
76
77
78/**
79 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
80 * This is only for reading in order to save a few calls.
81 *
82 * @param pVM Handle to the virtual machine.
83 */
84VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
85{
86 return pVM->cpum.s.CTX_SUFF(pHyperCore);
87}
88
89
90/**
91 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
92 *
93 * @returns VBox status code.
94 * @param pVM Handle to the virtual machine.
95 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
96 *
97 * @deprecated This will *not* (and has never) given the right picture of the
98 * hypervisor register state. With CPUMHyperSetCtxCore() this is
99 * getting much worse. So, use the individual functions for getting
100 * and esp. setting the hypervisor registers.
101 */
102VMMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
103{
104 *ppCtx = &pVM->cpum.s.Hyper;
105 return VINF_SUCCESS;
106}
107
108
109VMMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
110{
111 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
112 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
113 pVM->cpum.s.Hyper.gdtrPadding = 0;
114}
115
116
117VMMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
118{
119 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
120 pVM->cpum.s.Hyper.idtr.pIdt = addr;
121 pVM->cpum.s.Hyper.idtrPadding = 0;
122}
123
124
125VMMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
126{
127 pVM->cpum.s.Hyper.cr3 = cr3;
128
129#ifdef IN_RC
130 /* Update the current CR3. */
131 ASMSetCR3(cr3);
132#endif
133}
134
135VMMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM)
136{
137 return pVM->cpum.s.Hyper.cr3;
138}
139
140
141VMMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
142{
143 pVM->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
144}
145
146
147VMMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
148{
149 pVM->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
150}
151
152
153VMMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
154{
155 pVM->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
156}
157
158
159VMMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
160{
161 pVM->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
162}
163
164
165VMMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
166{
167 pVM->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
168}
169
170
171VMMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
172{
173 pVM->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
174}
175
176
177VMMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
178{
179 pVM->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
180}
181
182
183VMMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
184{
185 pVM->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
186 return VINF_SUCCESS;
187}
188
189
190VMMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
191{
192 pVM->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
193}
194
195
196VMMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
197{
198 pVM->cpum.s.Hyper.tr = SelTR;
199}
200
201
202VMMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
203{
204 pVM->cpum.s.Hyper.ldtr = SelLDTR;
205}
206
207
208VMMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
209{
210 pVM->cpum.s.Hyper.dr[0] = uDr0;
211 /** @todo in GC we must load it! */
212}
213
214
215VMMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
216{
217 pVM->cpum.s.Hyper.dr[1] = uDr1;
218 /** @todo in GC we must load it! */
219}
220
221
222VMMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
223{
224 pVM->cpum.s.Hyper.dr[2] = uDr2;
225 /** @todo in GC we must load it! */
226}
227
228
229VMMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
230{
231 pVM->cpum.s.Hyper.dr[3] = uDr3;
232 /** @todo in GC we must load it! */
233}
234
235
236VMMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
237{
238 pVM->cpum.s.Hyper.dr[6] = uDr6;
239 /** @todo in GC we must load it! */
240}
241
242
243VMMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
244{
245 pVM->cpum.s.Hyper.dr[7] = uDr7;
246 /** @todo in GC we must load it! */
247}
248
249
250VMMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
251{
252 return pVM->cpum.s.CTX_SUFF(pHyperCore)->cs;
253}
254
255
256VMMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
257{
258 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ds;
259}
260
261
262VMMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
263{
264 return pVM->cpum.s.CTX_SUFF(pHyperCore)->es;
265}
266
267
268VMMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
269{
270 return pVM->cpum.s.CTX_SUFF(pHyperCore)->fs;
271}
272
273
274VMMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
275{
276 return pVM->cpum.s.CTX_SUFF(pHyperCore)->gs;
277}
278
279
280VMMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
281{
282 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ss;
283}
284
285
286VMMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
287{
288 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eax;
289}
290
291
292VMMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
293{
294 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ebx;
295}
296
297
298VMMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
299{
300 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ecx;
301}
302
303
304VMMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
305{
306 return pVM->cpum.s.CTX_SUFF(pHyperCore)->edx;
307}
308
309
310VMMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
311{
312 return pVM->cpum.s.CTX_SUFF(pHyperCore)->esi;
313}
314
315
316VMMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
317{
318 return pVM->cpum.s.CTX_SUFF(pHyperCore)->edi;
319}
320
321
322VMMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
323{
324 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ebp;
325}
326
327
328VMMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
329{
330 return pVM->cpum.s.CTX_SUFF(pHyperCore)->esp;
331}
332
333
334VMMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
335{
336 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
337}
338
339
340VMMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
341{
342 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eip;
343}
344
345
346VMMDECL(uint64_t) CPUMGetHyperRIP(PVM pVM)
347{
348 return pVM->cpum.s.CTX_SUFF(pHyperCore)->rip;
349}
350
351
352VMMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
353{
354 if (pcbLimit)
355 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
356 return pVM->cpum.s.Hyper.idtr.pIdt;
357}
358
359
360VMMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
361{
362 if (pcbLimit)
363 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
364 return pVM->cpum.s.Hyper.gdtr.pGdt;
365}
366
367
368VMMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
369{
370 return pVM->cpum.s.Hyper.ldtr;
371}
372
373
374VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
375{
376 return pVM->cpum.s.Hyper.dr[0];
377}
378
379
380VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
381{
382 return pVM->cpum.s.Hyper.dr[1];
383}
384
385
386VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
387{
388 return pVM->cpum.s.Hyper.dr[2];
389}
390
391
392VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
393{
394 return pVM->cpum.s.Hyper.dr[3];
395}
396
397
398VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
399{
400 return pVM->cpum.s.Hyper.dr[6];
401}
402
403
404VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
405{
406 return pVM->cpum.s.Hyper.dr[7];
407}
408
409
410/**
411 * Gets the pointer to the internal CPUMCTXCORE structure.
412 * This is only for reading in order to save a few calls.
413 *
414 * @param pVM Handle to the virtual machine.
415 */
416VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
417{
418 VM_ASSERT_EMT(pVM);
419 return CPUMCTX2CORE(&pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest);
420}
421
422/**
423 * Gets the pointer to the internal CPUMCTXCORE structure.
424 * This is only for reading in order to save a few calls.
425 *
426 * @param pVM Handle to the virtual machine.
427 */
428VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCoreEx(PVM pVM, PVMCPU pVCpu)
429{
430 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
431}
432
433
434/**
435 * Sets the guest context core registers.
436 *
437 * @param pVM Handle to the virtual machine.
438 * @param pCtxCore The new context core values.
439 */
440VMMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
441{
442 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
443
444 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest);
445 *pCtxCoreDst = *pCtxCore;
446
447 /* Mask away invalid parts of the cpu context. */
448 if (!CPUMIsGuestInLongMode(pVM))
449 {
450 uint64_t u64Mask = UINT64_C(0xffffffff);
451
452 pCtxCoreDst->rip &= u64Mask;
453 pCtxCoreDst->rax &= u64Mask;
454 pCtxCoreDst->rbx &= u64Mask;
455 pCtxCoreDst->rcx &= u64Mask;
456 pCtxCoreDst->rdx &= u64Mask;
457 pCtxCoreDst->rsi &= u64Mask;
458 pCtxCoreDst->rdi &= u64Mask;
459 pCtxCoreDst->rbp &= u64Mask;
460 pCtxCoreDst->rsp &= u64Mask;
461 pCtxCoreDst->rflags.u &= u64Mask;
462
463 pCtxCoreDst->r8 = 0;
464 pCtxCoreDst->r9 = 0;
465 pCtxCoreDst->r10 = 0;
466 pCtxCoreDst->r11 = 0;
467 pCtxCoreDst->r12 = 0;
468 pCtxCoreDst->r13 = 0;
469 pCtxCoreDst->r14 = 0;
470 pCtxCoreDst->r15 = 0;
471 }
472}
473
474
475/**
476 * Queries the pointer to the internal CPUMCTX structure
477 *
478 * @returns The CPUMCTX pointer.
479 * @param pVM Handle to the virtual machine.
480 */
481VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVM pVM)
482{
483 return &pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest;
484}
485
486static PCPUMCPU cpumGetCpumCpu(PVM pVM)
487{
488 RTCPUID idCpu = VMMGetCpuId(pVM);
489
490 return &pVM->aCpus[idCpu].cpum.s;
491}
492
493VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtrEx(PVM pVM, PVMCPU pVCpu)
494{
495 Assert(pVCpu->idCpu < pVM->cCPUs);
496 return &pVCpu->cpum.s.Guest;
497}
498
499VMMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
500{
501 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
502
503 pCpumCpu->Guest.gdtr.cbGdt = limit;
504 pCpumCpu->Guest.gdtr.pGdt = addr;
505 pCpumCpu->fChanged |= CPUM_CHANGED_GDTR;
506 return VINF_SUCCESS;
507}
508
509VMMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
510{
511 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
512
513 pCpumCpu->Guest.idtr.cbIdt = limit;
514 pCpumCpu->Guest.idtr.pIdt = addr;
515 pCpumCpu->fChanged |= CPUM_CHANGED_IDTR;
516 return VINF_SUCCESS;
517}
518
519VMMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
520{
521 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
522
523 pCpumCpu->Guest.tr = tr;
524 pCpumCpu->fChanged |= CPUM_CHANGED_TR;
525 return VINF_SUCCESS;
526}
527
528VMMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
529{
530 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
531
532 pCpumCpu->Guest.ldtr = ldtr;
533 pCpumCpu->fChanged |= CPUM_CHANGED_LDTR;
534 return VINF_SUCCESS;
535}
536
537
538/**
539 * Set the guest CR0.
540 *
541 * When called in GC, the hyper CR0 may be updated if that is
542 * required. The caller only has to take special action if AM,
543 * WP, PG or PE changes.
544 *
545 * @returns VINF_SUCCESS (consider it void).
546 * @param pVM Pointer to the shared VM structure.
547 * @param cr0 The new CR0 value.
548 */
549VMMDECL(int) CPUMSetGuestCR0(PVM pVM, uint64_t cr0)
550{
551 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
552
553#ifdef IN_RC
554 /*
555 * Check if we need to change hypervisor CR0 because
556 * of math stuff.
557 */
558 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
559 != (pCpumCpu->Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
560 {
561 if (!(pCpumCpu->fUseFlags & CPUM_USED_FPU))
562 {
563 /*
564 * We haven't saved the host FPU state yet, so TS and MT are both set
565 * and EM should be reflecting the guest EM (it always does this).
566 */
567 if ((cr0 & X86_CR0_EM) != (pCpumCpu->Guest.cr0 & X86_CR0_EM))
568 {
569 uint32_t HyperCR0 = ASMGetCR0();
570 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
571 AssertMsg((HyperCR0 & X86_CR0_EM) == (pCpumCpu->Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
572 HyperCR0 &= ~X86_CR0_EM;
573 HyperCR0 |= cr0 & X86_CR0_EM;
574 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
575 ASMSetCR0(HyperCR0);
576 }
577# ifdef VBOX_STRICT
578 else
579 {
580 uint32_t HyperCR0 = ASMGetCR0();
581 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
582 AssertMsg((HyperCR0 & X86_CR0_EM) == (pCpumCpu->Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
583 }
584# endif
585 }
586 else
587 {
588 /*
589 * Already saved the state, so we're just mirroring
590 * the guest flags.
591 */
592 uint32_t HyperCR0 = ASMGetCR0();
593 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
594 == (pCpumCpu->Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
595 ("%#x %#x\n", HyperCR0, pCpumCpu->Guest.cr0));
596 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
597 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
598 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
599 ASMSetCR0(HyperCR0);
600 }
601 }
602#endif /* IN_RC */
603
604 /*
605 * Check for changes causing TLB flushes (for REM).
606 * The caller is responsible for calling PGM when appropriate.
607 */
608 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
609 != (pCpumCpu->Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
610 pCpumCpu->fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
611 pCpumCpu->fChanged |= CPUM_CHANGED_CR0;
612
613 pCpumCpu->Guest.cr0 = cr0 | X86_CR0_ET;
614 return VINF_SUCCESS;
615}
616
617
618VMMDECL(int) CPUMSetGuestCR2(PVM pVM, uint64_t cr2)
619{
620 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
621
622 pCpumCpu->Guest.cr2 = cr2;
623 return VINF_SUCCESS;
624}
625
626
627VMMDECL(int) CPUMSetGuestCR3(PVM pVM, uint64_t cr3)
628{
629 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
630
631 pCpumCpu->Guest.cr3 = cr3;
632 pCpumCpu->fChanged |= CPUM_CHANGED_CR3;
633 return VINF_SUCCESS;
634}
635
636
637VMMDECL(int) CPUMSetGuestCR4(PVM pVM, uint64_t cr4)
638{
639 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
640
641 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
642 != (pCpumCpu->Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
643 pCpumCpu->fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
644 pCpumCpu->fChanged |= CPUM_CHANGED_CR4;
645 if (!CPUMSupportsFXSR(pVM))
646 cr4 &= ~X86_CR4_OSFSXR;
647 pCpumCpu->Guest.cr4 = cr4;
648 return VINF_SUCCESS;
649}
650
651
652VMMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
653{
654 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
655
656 pCpumCpu->Guest.eflags.u32 = eflags;
657 return VINF_SUCCESS;
658}
659
660
661VMMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
662{
663 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
664
665 pCpumCpu->Guest.eip = eip;
666 return VINF_SUCCESS;
667}
668
669
670VMMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
671{
672 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
673
674 pCpumCpu->Guest.eax = eax;
675 return VINF_SUCCESS;
676}
677
678
679VMMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
680{
681 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
682
683 pCpumCpu->Guest.ebx = ebx;
684 return VINF_SUCCESS;
685}
686
687
688VMMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
689{
690 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
691
692 pCpumCpu->Guest.ecx = ecx;
693 return VINF_SUCCESS;
694}
695
696
697VMMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
698{
699 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
700
701 pCpumCpu->Guest.edx = edx;
702 return VINF_SUCCESS;
703}
704
705
706VMMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
707{
708 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
709
710 pCpumCpu->Guest.esp = esp;
711 return VINF_SUCCESS;
712}
713
714
715VMMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
716{
717 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
718
719 pCpumCpu->Guest.ebp = ebp;
720 return VINF_SUCCESS;
721}
722
723
724VMMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
725{
726 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
727
728 pCpumCpu->Guest.esi = esi;
729 return VINF_SUCCESS;
730}
731
732
733VMMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
734{
735 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
736
737 pCpumCpu->Guest.edi = edi;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
743{
744 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
745
746 pCpumCpu->Guest.ss = ss;
747 return VINF_SUCCESS;
748}
749
750
751VMMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
752{
753 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
754
755 pCpumCpu->Guest.cs = cs;
756 return VINF_SUCCESS;
757}
758
759
760VMMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
761{
762 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
763
764 pCpumCpu->Guest.ds = ds;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
770{
771 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
772
773 pCpumCpu->Guest.es = es;
774 return VINF_SUCCESS;
775}
776
777
778VMMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
779{
780 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
781
782 pCpumCpu->Guest.fs = fs;
783 return VINF_SUCCESS;
784}
785
786
787VMMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
788{
789 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
790
791 pCpumCpu->Guest.gs = gs;
792 return VINF_SUCCESS;
793}
794
795
796VMMDECL(void) CPUMSetGuestEFER(PVM pVM, uint64_t val)
797{
798 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
799
800 pCpumCpu->Guest.msrEFER = val;
801}
802
803
804VMMDECL(uint64_t) CPUMGetGuestMsr(PVM pVM, unsigned idMsr)
805{
806 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
807 uint64_t u64 = 0;
808
809 switch (idMsr)
810 {
811 case MSR_IA32_CR_PAT:
812 u64 = pCpumCpu->Guest.msrPAT;
813 break;
814
815 case MSR_IA32_SYSENTER_CS:
816 u64 = pCpumCpu->Guest.SysEnter.cs;
817 break;
818
819 case MSR_IA32_SYSENTER_EIP:
820 u64 = pCpumCpu->Guest.SysEnter.eip;
821 break;
822
823 case MSR_IA32_SYSENTER_ESP:
824 u64 = pCpumCpu->Guest.SysEnter.esp;
825 break;
826
827 case MSR_K6_EFER:
828 u64 = pCpumCpu->Guest.msrEFER;
829 break;
830
831 case MSR_K8_SF_MASK:
832 u64 = pCpumCpu->Guest.msrSFMASK;
833 break;
834
835 case MSR_K6_STAR:
836 u64 = pCpumCpu->Guest.msrSTAR;
837 break;
838
839 case MSR_K8_LSTAR:
840 u64 = pCpumCpu->Guest.msrLSTAR;
841 break;
842
843 case MSR_K8_CSTAR:
844 u64 = pCpumCpu->Guest.msrCSTAR;
845 break;
846
847 case MSR_K8_KERNEL_GS_BASE:
848 u64 = pCpumCpu->Guest.msrKERNELGSBASE;
849 break;
850
851 case MSR_K8_TSC_AUX:
852 u64 = pCpumCpu->GuestMsr.msr.tscAux;
853 break;
854
855 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
856 default:
857 AssertFailed();
858 break;
859 }
860 return u64;
861}
862
863VMMDECL(void) CPUMSetGuestMsr(PVM pVM, unsigned idMsr, uint64_t valMsr)
864{
865 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
866
867 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
868 switch (idMsr)
869 {
870 case MSR_K8_TSC_AUX:
871 pCpumCpu->GuestMsr.msr.tscAux = valMsr;
872 break;
873
874 default:
875 AssertFailed();
876 break;
877 }
878}
879
880VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
881{
882 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
883
884 if (pcbLimit)
885 *pcbLimit = pCpumCpu->Guest.idtr.cbIdt;
886 return pCpumCpu->Guest.idtr.pIdt;
887}
888
889
890VMMDECL(RTSEL) CPUMGetGuestTR(PVM pVM)
891{
892 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
893
894 return pCpumCpu->Guest.tr;
895}
896
897
898VMMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
899{
900 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
901
902 return pCpumCpu->Guest.cs;
903}
904
905
906VMMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
907{
908 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
909
910 return pCpumCpu->Guest.ds;
911}
912
913
914VMMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
915{
916 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
917
918 return pCpumCpu->Guest.es;
919}
920
921
922VMMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
923{
924 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
925
926 return pCpumCpu->Guest.fs;
927}
928
929
930VMMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
931{
932 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
933
934 return pCpumCpu->Guest.gs;
935}
936
937
938VMMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
939{
940 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
941
942 return pCpumCpu->Guest.ss;
943}
944
945
946VMMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
947{
948 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
949
950 return pCpumCpu->Guest.ldtr;
951}
952
953
954VMMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM)
955{
956 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
957
958 return pCpumCpu->Guest.cr0;
959}
960
961
962VMMDECL(uint64_t) CPUMGetGuestCR2(PVM pVM)
963{
964 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
965
966 return pCpumCpu->Guest.cr2;
967}
968
969
970VMMDECL(uint64_t) CPUMGetGuestCR3(PVM pVM)
971{
972 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
973
974 return pCpumCpu->Guest.cr3;
975}
976
977
978VMMDECL(uint64_t) CPUMGetGuestCR4(PVM pVM)
979{
980 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
981
982 return pCpumCpu->Guest.cr4;
983}
984
985
986VMMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
987{
988 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
989
990 *pGDTR = pCpumCpu->Guest.gdtr;
991}
992
993
994VMMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
995{
996 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
997
998 return pCpumCpu->Guest.eip;
999}
1000
1001
1002VMMDECL(uint64_t) CPUMGetGuestRIP(PVM pVM)
1003{
1004 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1005
1006 return pCpumCpu->Guest.rip;
1007}
1008
1009
1010VMMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
1011{
1012 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1013
1014 return pCpumCpu->Guest.eax;
1015}
1016
1017
1018VMMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
1019{
1020 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1021
1022 return pCpumCpu->Guest.ebx;
1023}
1024
1025
1026VMMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
1027{
1028 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1029
1030 return pCpumCpu->Guest.ecx;
1031}
1032
1033
1034VMMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
1035{
1036 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1037
1038 return pCpumCpu->Guest.edx;
1039}
1040
1041
1042VMMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
1043{
1044 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1045
1046 return pCpumCpu->Guest.esi;
1047}
1048
1049
1050VMMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
1051{
1052 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1053
1054 return pCpumCpu->Guest.edi;
1055}
1056
1057
1058VMMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
1059{
1060 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1061
1062 return pCpumCpu->Guest.esp;
1063}
1064
1065
1066VMMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
1067{
1068 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1069
1070 return pCpumCpu->Guest.ebp;
1071}
1072
1073
1074VMMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
1075{
1076 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1077
1078 return pCpumCpu->Guest.eflags.u32;
1079}
1080
1081
1082VMMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)
1083{
1084 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1085
1086 return &pCpumCpu->Guest.trHid;
1087}
1088
1089
1090///@todo: crx should be an array
1091VMMDECL(int) CPUMGetGuestCRx(PVM pVM, unsigned iReg, uint64_t *pValue)
1092{
1093 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1094
1095 switch (iReg)
1096 {
1097 case USE_REG_CR0:
1098 *pValue = pCpumCpu->Guest.cr0;
1099 break;
1100 case USE_REG_CR2:
1101 *pValue = pCpumCpu->Guest.cr2;
1102 break;
1103 case USE_REG_CR3:
1104 *pValue = pCpumCpu->Guest.cr3;
1105 break;
1106 case USE_REG_CR4:
1107 *pValue = pCpumCpu->Guest.cr4;
1108 break;
1109 default:
1110 return VERR_INVALID_PARAMETER;
1111 }
1112 return VINF_SUCCESS;
1113}
1114
1115
1116VMMDECL(uint64_t) CPUMGetGuestDR0(PVM pVM)
1117{
1118 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1119
1120 return pCpumCpu->Guest.dr[0];
1121}
1122
1123
1124VMMDECL(uint64_t) CPUMGetGuestDR1(PVM pVM)
1125{
1126 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1127
1128 return pCpumCpu->Guest.dr[1];
1129}
1130
1131
1132VMMDECL(uint64_t) CPUMGetGuestDR2(PVM pVM)
1133{
1134 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1135
1136 return pCpumCpu->Guest.dr[2];
1137}
1138
1139
1140VMMDECL(uint64_t) CPUMGetGuestDR3(PVM pVM)
1141{
1142 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1143
1144 return pCpumCpu->Guest.dr[3];
1145}
1146
1147
1148VMMDECL(uint64_t) CPUMGetGuestDR6(PVM pVM)
1149{
1150 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1151
1152 return pCpumCpu->Guest.dr[6];
1153}
1154
1155
1156VMMDECL(uint64_t) CPUMGetGuestDR7(PVM pVM)
1157{
1158 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1159
1160 return pCpumCpu->Guest.dr[7];
1161}
1162
1163
1164VMMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint64_t *pValue)
1165{
1166 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1167
1168 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1169 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1170 if (iReg == 4 || iReg == 5)
1171 iReg += 2;
1172 *pValue = pCpumCpu->Guest.dr[iReg];
1173 return VINF_SUCCESS;
1174}
1175
1176
1177VMMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM)
1178{
1179 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1180
1181 return pCpumCpu->Guest.msrEFER;
1182}
1183
1184
1185/**
1186 * Gets a CpuId leaf.
1187 *
1188 * @param pVM The VM handle.
1189 * @param iLeaf The CPUID leaf to get.
1190 * @param pEax Where to store the EAX value.
1191 * @param pEbx Where to store the EBX value.
1192 * @param pEcx Where to store the ECX value.
1193 * @param pEdx Where to store the EDX value.
1194 */
1195VMMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1196{
1197 PCCPUMCPUID pCpuId;
1198 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1199 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1200 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1201 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1202 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1203 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1204 else
1205 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1206
1207 *pEax = pCpuId->eax;
1208 *pEbx = pCpuId->ebx;
1209 *pEcx = pCpuId->ecx;
1210 *pEdx = pCpuId->edx;
1211 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1212}
1213
1214
1215/**
1216 * Gets a pointer to the array of standard CPUID leafs.
1217 *
1218 * CPUMGetGuestCpuIdStdMax() give the size of the array.
1219 *
1220 * @returns Pointer to the standard CPUID leafs (read-only).
1221 * @param pVM The VM handle.
1222 * @remark Intended for PATM.
1223 */
1224VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdRCPtr(PVM pVM)
1225{
1226 return RCPTRTYPE(PCCPUMCPUID)VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
1227}
1228
1229
1230/**
1231 * Gets a pointer to the array of extended CPUID leafs.
1232 *
1233 * CPUMGetGuestCpuIdExtMax() give the size of the array.
1234 *
1235 * @returns Pointer to the extended CPUID leafs (read-only).
1236 * @param pVM The VM handle.
1237 * @remark Intended for PATM.
1238 */
1239VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtRCPtr(PVM pVM)
1240{
1241 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
1242}
1243
1244
1245/**
1246 * Gets a pointer to the array of centaur CPUID leafs.
1247 *
1248 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
1249 *
1250 * @returns Pointer to the centaur CPUID leafs (read-only).
1251 * @param pVM The VM handle.
1252 * @remark Intended for PATM.
1253 */
1254VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurRCPtr(PVM pVM)
1255{
1256 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
1257}
1258
1259
1260/**
1261 * Gets a pointer to the default CPUID leaf.
1262 *
1263 * @returns Pointer to the default CPUID leaf (read-only).
1264 * @param pVM The VM handle.
1265 * @remark Intended for PATM.
1266 */
1267VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefRCPtr(PVM pVM)
1268{
1269 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
1270}
1271
1272
1273/**
1274 * Gets a number of standard CPUID leafs.
1275 *
1276 * @returns Number of leafs.
1277 * @param pVM The VM handle.
1278 * @remark Intended for PATM.
1279 */
1280VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1281{
1282 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1283}
1284
1285
1286/**
1287 * Gets a number of extended CPUID leafs.
1288 *
1289 * @returns Number of leafs.
1290 * @param pVM The VM handle.
1291 * @remark Intended for PATM.
1292 */
1293VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1294{
1295 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1296}
1297
1298
1299/**
1300 * Gets a number of centaur CPUID leafs.
1301 *
1302 * @returns Number of leafs.
1303 * @param pVM The VM handle.
1304 * @remark Intended for PATM.
1305 */
1306VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1307{
1308 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1309}
1310
1311
1312/**
1313 * Sets a CPUID feature bit.
1314 *
1315 * @param pVM The VM Handle.
1316 * @param enmFeature The feature to set.
1317 */
1318VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1319{
1320 switch (enmFeature)
1321 {
1322 /*
1323 * Set the APIC bit in both feature masks.
1324 */
1325 case CPUMCPUIDFEATURE_APIC:
1326 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1327 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1328 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1329 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1330 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1331 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1332 break;
1333
1334 /*
1335 * Set the x2APIC bit in the standard feature mask.
1336 */
1337 case CPUMCPUIDFEATURE_X2APIC:
1338 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1339 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1340 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1341 break;
1342
1343 /*
1344 * Set the sysenter/sysexit bit in the standard feature mask.
1345 * Assumes the caller knows what it's doing! (host must support these)
1346 */
1347 case CPUMCPUIDFEATURE_SEP:
1348 {
1349 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1350 {
1351 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1352 return;
1353 }
1354
1355 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1356 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1357 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1358 break;
1359 }
1360
1361 /*
1362 * Set the syscall/sysret bit in the extended feature mask.
1363 * Assumes the caller knows what it's doing! (host must support these)
1364 */
1365 case CPUMCPUIDFEATURE_SYSCALL:
1366 {
1367 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1368 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1369 {
1370#if HC_ARCH_BITS == 32
1371 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1372 * Even when the cpu is capable of doing so in 64 bits mode.
1373 */
1374 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1375 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1376 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1377#endif
1378 {
1379 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1380 return;
1381 }
1382 }
1383 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1384 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1385 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1386 break;
1387 }
1388
1389 /*
1390 * Set the PAE bit in both feature masks.
1391 * Assumes the caller knows what it's doing! (host must support these)
1392 */
1393 case CPUMCPUIDFEATURE_PAE:
1394 {
1395 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1396 {
1397 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1398 return;
1399 }
1400
1401 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1402 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1403 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1404 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1405 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1406 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1407 break;
1408 }
1409
1410 /*
1411 * Set the LONG MODE bit in the extended feature mask.
1412 * Assumes the caller knows what it's doing! (host must support these)
1413 */
1414 case CPUMCPUIDFEATURE_LONG_MODE:
1415 {
1416 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1417 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1418 {
1419 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1420 return;
1421 }
1422
1423 /* Valid for both Intel and AMD. */
1424 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1425 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1426 break;
1427 }
1428
1429 /*
1430 * Set the NXE bit in the extended feature mask.
1431 * Assumes the caller knows what it's doing! (host must support these)
1432 */
1433 case CPUMCPUIDFEATURE_NXE:
1434 {
1435 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1436 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1437 {
1438 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1439 return;
1440 }
1441
1442 /* Valid for both Intel and AMD. */
1443 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1444 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1445 break;
1446 }
1447
1448 case CPUMCPUIDFEATURE_LAHF:
1449 {
1450 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1451 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1452 {
1453 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1454 return;
1455 }
1456
1457 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1458 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1459 break;
1460 }
1461
1462 case CPUMCPUIDFEATURE_PAT:
1463 {
1464 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1465 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1466 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1467 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1468 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1469 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1470 break;
1471 }
1472
1473 case CPUMCPUIDFEATURE_RDTSCP:
1474 {
1475 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1476 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP))
1477 {
1478 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1479 return;
1480 }
1481
1482 /* Valid for AMD only (for now). */
1483 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1484 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1485 break;
1486 }
1487
1488 default:
1489 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1490 break;
1491 }
1492 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1493
1494 pCpumCpu->fChanged |= CPUM_CHANGED_CPUID;
1495}
1496
1497
1498/**
1499 * Queries a CPUID feature bit.
1500 *
1501 * @returns boolean for feature presence
1502 * @param pVM The VM Handle.
1503 * @param enmFeature The feature to query.
1504 */
1505VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1506{
1507 switch (enmFeature)
1508 {
1509 case CPUMCPUIDFEATURE_PAE:
1510 {
1511 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1512 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1513 break;
1514 }
1515
1516 case CPUMCPUIDFEATURE_RDTSCP:
1517 {
1518 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1519 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1520 break;
1521 }
1522
1523 case CPUMCPUIDFEATURE_LONG_MODE:
1524 {
1525 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1526 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1527 break;
1528 }
1529
1530 default:
1531 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1532 break;
1533 }
1534 return false;
1535}
1536
1537
1538/**
1539 * Clears a CPUID feature bit.
1540 *
1541 * @param pVM The VM Handle.
1542 * @param enmFeature The feature to clear.
1543 */
1544VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1545{
1546 switch (enmFeature)
1547 {
1548 /*
1549 * Set the APIC bit in both feature masks.
1550 */
1551 case CPUMCPUIDFEATURE_APIC:
1552 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1553 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1554 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1555 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1556 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1557 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1558 break;
1559
1560 /*
1561 * Clear the x2APIC bit in the standard feature mask.
1562 */
1563 case CPUMCPUIDFEATURE_X2APIC:
1564 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1565 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1566 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1567 break;
1568
1569 case CPUMCPUIDFEATURE_PAE:
1570 {
1571 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1572 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1573 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1574 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1575 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1576 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1577 break;
1578 }
1579
1580 case CPUMCPUIDFEATURE_PAT:
1581 {
1582 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1583 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1584 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1585 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1586 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1587 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1588 break;
1589 }
1590
1591 case CPUMCPUIDFEATURE_LONG_MODE:
1592 {
1593 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1594 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1595 break;
1596 }
1597
1598 case CPUMCPUIDFEATURE_LAHF:
1599 {
1600 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1601 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1602 break;
1603 }
1604
1605 default:
1606 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1607 break;
1608 }
1609 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1610 pCpumCpu->fChanged |= CPUM_CHANGED_CPUID;
1611}
1612
1613
1614/**
1615 * Gets the CPU vendor
1616 *
1617 * @returns CPU vendor
1618 * @param pVM The VM handle.
1619 */
1620VMMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM)
1621{
1622 return pVM->cpum.s.enmCPUVendor;
1623}
1624
1625
1626VMMDECL(int) CPUMSetGuestDR0(PVM pVM, uint64_t uDr0)
1627{
1628 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1629
1630 pCpumCpu->Guest.dr[0] = uDr0;
1631 return CPUMRecalcHyperDRx(pVM);
1632}
1633
1634
1635VMMDECL(int) CPUMSetGuestDR1(PVM pVM, uint64_t uDr1)
1636{
1637 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1638
1639 pCpumCpu->Guest.dr[1] = uDr1;
1640 return CPUMRecalcHyperDRx(pVM);
1641}
1642
1643
1644VMMDECL(int) CPUMSetGuestDR2(PVM pVM, uint64_t uDr2)
1645{
1646 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1647
1648 pCpumCpu->Guest.dr[2] = uDr2;
1649 return CPUMRecalcHyperDRx(pVM);
1650}
1651
1652
1653VMMDECL(int) CPUMSetGuestDR3(PVM pVM, uint64_t uDr3)
1654{
1655 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1656
1657 pCpumCpu->Guest.dr[3] = uDr3;
1658 return CPUMRecalcHyperDRx(pVM);
1659}
1660
1661
1662VMMDECL(int) CPUMSetGuestDR6(PVM pVM, uint64_t uDr6)
1663{
1664 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1665
1666 pCpumCpu->Guest.dr[6] = uDr6;
1667 return CPUMRecalcHyperDRx(pVM);
1668}
1669
1670
1671VMMDECL(int) CPUMSetGuestDR7(PVM pVM, uint64_t uDr7)
1672{
1673 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1674
1675 pCpumCpu->Guest.dr[7] = uDr7;
1676 return CPUMRecalcHyperDRx(pVM);
1677}
1678
1679
1680VMMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint64_t Value)
1681{
1682 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1683
1684 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1685 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1686 if (iReg == 4 || iReg == 5)
1687 iReg += 2;
1688 pCpumCpu->Guest.dr[iReg] = Value;
1689 return CPUMRecalcHyperDRx(pVM);
1690}
1691
1692
1693/**
1694 * Recalculates the hypvervisor DRx register values based on
1695 * current guest registers and DBGF breakpoints.
1696 *
1697 * This is called whenever a guest DRx register is modified and when DBGF
1698 * sets a hardware breakpoint. In guest context this function will reload
1699 * any (hyper) DRx registers which comes out with a different value.
1700 *
1701 * @returns VINF_SUCCESS.
1702 * @param pVM The VM handle.
1703 */
1704VMMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1705{
1706 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1707 /*
1708 * Compare the DR7s first.
1709 *
1710 * We only care about the enabled flags. The GE and LE flags are always
1711 * set and we don't care if the guest doesn't set them. GD is virtualized
1712 * when we dispatch #DB, we never enable it.
1713 */
1714 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1715#ifdef CPUM_VIRTUALIZE_DRX
1716 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1717#else
1718 const RTGCUINTREG uGstDr7 = 0;
1719#endif
1720 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1721 {
1722 /*
1723 * Ok, something is enabled. Recalc each of the breakpoints.
1724 * Straight forward code, not optimized/minimized in any way.
1725 */
1726 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1727
1728 /* bp 0 */
1729 RTGCUINTREG uNewDr0;
1730 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1731 {
1732 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1733 uNewDr0 = DBGFBpGetDR0(pVM);
1734 }
1735 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1736 {
1737 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1738 uNewDr0 = CPUMGetGuestDR0(pVM);
1739 }
1740 else
1741 uNewDr0 = pVM->cpum.s.Hyper.dr[0];
1742
1743 /* bp 1 */
1744 RTGCUINTREG uNewDr1;
1745 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1746 {
1747 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1748 uNewDr1 = DBGFBpGetDR1(pVM);
1749 }
1750 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1751 {
1752 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1753 uNewDr1 = CPUMGetGuestDR1(pVM);
1754 }
1755 else
1756 uNewDr1 = pVM->cpum.s.Hyper.dr[1];
1757
1758 /* bp 2 */
1759 RTGCUINTREG uNewDr2;
1760 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1761 {
1762 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1763 uNewDr2 = DBGFBpGetDR2(pVM);
1764 }
1765 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1766 {
1767 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1768 uNewDr2 = CPUMGetGuestDR2(pVM);
1769 }
1770 else
1771 uNewDr2 = pVM->cpum.s.Hyper.dr[2];
1772
1773 /* bp 3 */
1774 RTGCUINTREG uNewDr3;
1775 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1776 {
1777 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1778 uNewDr3 = DBGFBpGetDR3(pVM);
1779 }
1780 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1781 {
1782 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1783 uNewDr3 = CPUMGetGuestDR3(pVM);
1784 }
1785 else
1786 uNewDr3 = pVM->cpum.s.Hyper.dr[3];
1787
1788 /*
1789 * Apply the updates.
1790 */
1791#ifdef IN_RC
1792 if (!(pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS))
1793 {
1794 /** @todo save host DBx registers. */
1795 }
1796#endif
1797 pCpumCpu->fUseFlags |= CPUM_USE_DEBUG_REGS;
1798 if (uNewDr3 != pVM->cpum.s.Hyper.dr[3])
1799 CPUMSetHyperDR3(pVM, uNewDr3);
1800 if (uNewDr2 != pVM->cpum.s.Hyper.dr[2])
1801 CPUMSetHyperDR2(pVM, uNewDr2);
1802 if (uNewDr1 != pVM->cpum.s.Hyper.dr[1])
1803 CPUMSetHyperDR1(pVM, uNewDr1);
1804 if (uNewDr0 != pVM->cpum.s.Hyper.dr[0])
1805 CPUMSetHyperDR0(pVM, uNewDr0);
1806 if (uNewDr7 != pVM->cpum.s.Hyper.dr[7])
1807 CPUMSetHyperDR7(pVM, uNewDr7);
1808 }
1809 else
1810 {
1811#ifdef IN_RC
1812 if (pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS)
1813 {
1814 /** @todo restore host DBx registers. */
1815 }
1816#endif
1817 pCpumCpu->fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1818 }
1819 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1820 pCpumCpu->fUseFlags, pVM->cpum.s.Hyper.dr[0], pVM->cpum.s.Hyper.dr[1],
1821 pVM->cpum.s.Hyper.dr[2], pVM->cpum.s.Hyper.dr[3], pVM->cpum.s.Hyper.dr[6],
1822 pVM->cpum.s.Hyper.dr[7]));
1823
1824 return VINF_SUCCESS;
1825}
1826
1827#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1828
1829/**
1830 * Transforms the guest CPU state to raw-ring mode.
1831 *
1832 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1833 *
1834 * @returns VBox status. (recompiler failure)
1835 * @param pVM VM handle.
1836 * @param pCtxCore The context core (for trap usage).
1837 * @see @ref pg_raw
1838 */
1839VMMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1840{
1841 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1842
1843 Assert(!pVM->cpum.s.fRawEntered);
1844 if (!pCtxCore)
1845 pCtxCore = CPUMCTX2CORE(&pCpumCpu->Guest);
1846
1847 /*
1848 * Are we in Ring-0?
1849 */
1850 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1851 && !pCtxCore->eflags.Bits.u1VM)
1852 {
1853 /*
1854 * Enter execution mode.
1855 */
1856 PATMRawEnter(pVM, pCtxCore);
1857
1858 /*
1859 * Set CPL to Ring-1.
1860 */
1861 pCtxCore->ss |= 1;
1862 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1863 pCtxCore->cs |= 1;
1864 }
1865 else
1866 {
1867 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1868 ("ring-1 code not supported\n"));
1869 /*
1870 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1871 */
1872 PATMRawEnter(pVM, pCtxCore);
1873 }
1874
1875 /*
1876 * Assert sanity.
1877 */
1878 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1879 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1880 || pCtxCore->eflags.Bits.u1VM,
1881 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1882 Assert((pCpumCpu->Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1883 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1884
1885 pVM->cpum.s.fRawEntered = true;
1886 return VINF_SUCCESS;
1887}
1888
1889
1890/**
1891 * Transforms the guest CPU state from raw-ring mode to correct values.
1892 *
1893 * This function will change any selector registers with DPL=1 to DPL=0.
1894 *
1895 * @returns Adjusted rc.
1896 * @param pVM VM handle.
1897 * @param rc Raw mode return code
1898 * @param pCtxCore The context core (for trap usage).
1899 * @see @ref pg_raw
1900 */
1901VMMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1902{
1903 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1904
1905 /*
1906 * Don't leave if we've already left (in GC).
1907 */
1908 Assert(pVM->cpum.s.fRawEntered);
1909 if (!pVM->cpum.s.fRawEntered)
1910 return rc;
1911 pVM->cpum.s.fRawEntered = false;
1912
1913 PCPUMCTX pCtx = &pCpumCpu->Guest;
1914 if (!pCtxCore)
1915 pCtxCore = CPUMCTX2CORE(pCtx);
1916 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1917 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1918 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1919
1920 /*
1921 * Are we executing in raw ring-1?
1922 */
1923 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1924 && !pCtxCore->eflags.Bits.u1VM)
1925 {
1926 /*
1927 * Leave execution mode.
1928 */
1929 PATMRawLeave(pVM, pCtxCore, rc);
1930 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1931 /** @todo See what happens if we remove this. */
1932 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1933 pCtxCore->ds &= ~X86_SEL_RPL;
1934 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1935 pCtxCore->es &= ~X86_SEL_RPL;
1936 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1937 pCtxCore->fs &= ~X86_SEL_RPL;
1938 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1939 pCtxCore->gs &= ~X86_SEL_RPL;
1940
1941 /*
1942 * Ring-1 selector => Ring-0.
1943 */
1944 pCtxCore->ss &= ~X86_SEL_RPL;
1945 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1946 pCtxCore->cs &= ~X86_SEL_RPL;
1947 }
1948 else
1949 {
1950 /*
1951 * PATM is taking care of the IOPL and IF flags for us.
1952 */
1953 PATMRawLeave(pVM, pCtxCore, rc);
1954 if (!pCtxCore->eflags.Bits.u1VM)
1955 {
1956 /** @todo See what happens if we remove this. */
1957 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1958 pCtxCore->ds &= ~X86_SEL_RPL;
1959 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1960 pCtxCore->es &= ~X86_SEL_RPL;
1961 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1962 pCtxCore->fs &= ~X86_SEL_RPL;
1963 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1964 pCtxCore->gs &= ~X86_SEL_RPL;
1965 }
1966 }
1967
1968 return rc;
1969}
1970
1971/**
1972 * Updates the EFLAGS while we're in raw-mode.
1973 *
1974 * @param pVM The VM handle.
1975 * @param pCtxCore The context core.
1976 * @param eflags The new EFLAGS value.
1977 */
1978VMMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1979{
1980 if (!pVM->cpum.s.fRawEntered)
1981 {
1982 pCtxCore->eflags.u32 = eflags;
1983 return;
1984 }
1985 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1986}
1987
1988#endif /* !IN_RING0 */
1989
1990/**
1991 * Gets the EFLAGS while we're in raw-mode.
1992 *
1993 * @returns The eflags.
1994 * @param pVM The VM handle.
1995 * @param pCtxCore The context core.
1996 */
1997VMMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1998{
1999#ifdef IN_RING0
2000 return pCtxCore->eflags.u32;
2001#else
2002 if (!pVM->cpum.s.fRawEntered)
2003 return pCtxCore->eflags.u32;
2004 return PATMRawGetEFlags(pVM, pCtxCore);
2005#endif
2006}
2007
2008
2009/**
2010 * Gets and resets the changed flags (CPUM_CHANGED_*).
2011 * Only REM should call this function.
2012 *
2013 * @returns The changed flags.
2014 * @param pVM The VM handle.
2015 */
2016VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
2017{
2018 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2019
2020 unsigned fFlags = pCpumCpu->fChanged;
2021 pCpumCpu->fChanged = 0;
2022 /** @todo change the switcher to use the fChanged flags. */
2023 if (pCpumCpu->fUseFlags & CPUM_USED_FPU_SINCE_REM)
2024 {
2025 fFlags |= CPUM_CHANGED_FPU_REM;
2026 pCpumCpu->fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2027 }
2028 return fFlags;
2029}
2030
2031
2032/**
2033 * Sets the specified changed flags (CPUM_CHANGED_*).
2034 *
2035 * @param pVM The VM handle.
2036 */
2037VMMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
2038{
2039 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2040
2041 pCpumCpu->fChanged |= fChangedFlags;
2042}
2043
2044
2045/**
2046 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2047 * @returns true if supported.
2048 * @returns false if not supported.
2049 * @param pVM The VM handle.
2050 */
2051VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2052{
2053 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2054}
2055
2056
2057/**
2058 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2059 * @returns true if used.
2060 * @returns false if not used.
2061 * @param pVM The VM handle.
2062 */
2063VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2064{
2065 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2066
2067 return (pCpumCpu->fUseFlags & CPUM_USE_SYSENTER) != 0;
2068}
2069
2070
2071/**
2072 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2073 * @returns true if used.
2074 * @returns false if not used.
2075 * @param pVM The VM handle.
2076 */
2077VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2078{
2079 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2080
2081 return (pCpumCpu->fUseFlags & CPUM_USE_SYSCALL) != 0;
2082}
2083
2084#ifndef IN_RING3
2085
2086/**
2087 * Lazily sync in the FPU/XMM state
2088 *
2089 * @returns VBox status code.
2090 * @param pVM VM handle.
2091 * @param pVCpu VMCPU handle
2092 */
2093VMMDECL(int) CPUMHandleLazyFPU(PVM pVM, PVMCPU pVCpu)
2094{
2095 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2096}
2097
2098#endif /* !IN_RING3 */
2099
2100/**
2101 * Checks if we activated the FPU/XMM state of the guest OS
2102 * @returns true if we did.
2103 * @returns false if not.
2104 * @param pVCpu The VMCPU handle.
2105 */
2106VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2107{
2108 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2109}
2110
2111
2112/**
2113 * Deactivate the FPU/XMM state of the guest OS
2114 * @param pVM The VM handle.
2115 */
2116VMMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
2117{
2118 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2119
2120 pCpumCpu->fUseFlags &= ~CPUM_USED_FPU;
2121}
2122
2123
2124/**
2125 * Checks if the guest debug state is active
2126 *
2127 * @returns boolean
2128 * @param pVM VM handle.
2129 */
2130VMMDECL(bool) CPUMIsGuestDebugStateActive(PVM pVM)
2131{
2132 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2133
2134 return (pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2135}
2136
2137
2138/**
2139 * Mark the guest's debug state as inactive
2140 *
2141 * @returns boolean
2142 * @param pVM VM handle.
2143 */
2144VMMDECL(void) CPUMDeactivateGuestDebugState(PVM pVM)
2145{
2146 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2147
2148 pCpumCpu->fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2149}
2150
2151
2152/**
2153 * Checks if the hidden selector registers are valid
2154 * @returns true if they are.
2155 * @returns false if not.
2156 * @param pVM The VM handle.
2157 */
2158VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
2159{
2160 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
2161}
2162
2163
2164/**
2165 * Checks if the hidden selector registers are valid
2166 * @param pVM The VM handle.
2167 * @param fValid Valid or not
2168 */
2169VMMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
2170{
2171 pVM->cpum.s.fValidHiddenSelRegs = fValid;
2172}
2173
2174
2175/**
2176 * Get the current privilege level of the guest.
2177 *
2178 * @returns cpl
2179 * @param pVM VM Handle.
2180 * @param pRegFrame Trap register frame.
2181 */
2182VMMDECL(uint32_t) CPUMGetGuestCPL(PVM pVM, PCPUMCTXCORE pCtxCore)
2183{
2184 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2185 uint32_t cpl;
2186
2187 if (CPUMAreHiddenSelRegsValid(pVM))
2188 {
2189 /*
2190 * The hidden CS.DPL register is always equal to the CPL, it is
2191 * not affected by loading a conforming coding segment.
2192 *
2193 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2194 * at SS. (ACP2 regression during install after a far call to ring 2)
2195 */
2196 if (RT_LIKELY(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2197 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2198 else
2199 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2200 }
2201 else if (RT_LIKELY(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2202 {
2203 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2204 {
2205 /*
2206 * The SS RPL is always equal to the CPL, while the CS RPL
2207 * isn't necessarily equal if the segment is conforming.
2208 * See section 4.11.1 in the AMD manual.
2209 */
2210 cpl = (pCtxCore->ss & X86_SEL_RPL);
2211#ifndef IN_RING0
2212 if (cpl == 1)
2213 cpl = 0;
2214#endif
2215 }
2216 else
2217 cpl = 3;
2218 }
2219 else
2220 cpl = 0; /* real mode; cpl is zero */
2221
2222 return cpl;
2223}
2224
2225
2226/**
2227 * Gets the current guest CPU mode.
2228 *
2229 * If paging mode is what you need, check out PGMGetGuestMode().
2230 *
2231 * @returns The CPU mode.
2232 * @param pVM The VM handle.
2233 */
2234VMMDECL(CPUMMODE) CPUMGetGuestMode(PVM pVM)
2235{
2236 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2237
2238 CPUMMODE enmMode;
2239 if (!(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2240 enmMode = CPUMMODE_REAL;
2241 else if (!(pCpumCpu->Guest.msrEFER & MSR_K6_EFER_LMA))
2242 enmMode = CPUMMODE_PROTECTED;
2243 else
2244 enmMode = CPUMMODE_LONG;
2245
2246 return enmMode;
2247}
2248
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette