VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 18078

Last change on this file since 18078 was 17106, checked in by vboxsync, 16 years ago

VMM,REM: Removed the single page limitation on the TSS monitoring and going over the interrupt redirection bitmap monitoring.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 57.0 KB
Line 
1/* $Id: CPUMAllRegs.cpp 17106 2009-02-25 00:35:15Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#ifdef IN_RING3
39#include <iprt/thread.h>
40#endif
41
42/** Disable stack frame pointer generation here. */
43#if defined(_MSC_VER) && !defined(DEBUG)
44# pragma optimize("y", off)
45#endif
46
47
48/**
49 * Sets or resets an alternative hypervisor context core.
50 *
51 * This is called when we get a hypervisor trap set switch the context
52 * core with the trap frame on the stack. It is called again to reset
53 * back to the default context core when resuming hypervisor execution.
54 *
55 * @param pVM The VM handle.
56 * @param pCtxCore Pointer to the alternative context core or NULL
57 * to go back to the default context core.
58 */
59VMMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
60{
61 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
62 if (!pCtxCore)
63 {
64 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
65 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
66 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
67 pVM->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
68 }
69 else
70 {
71 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
72 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
73 pVM->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
74 }
75}
76
77
78/**
79 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
80 * This is only for reading in order to save a few calls.
81 *
82 * @param pVM Handle to the virtual machine.
83 */
84VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
85{
86 return pVM->cpum.s.CTX_SUFF(pHyperCore);
87}
88
89
90/**
91 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
92 *
93 * @returns VBox status code.
94 * @param pVM Handle to the virtual machine.
95 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
96 *
97 * @deprecated This will *not* (and has never) given the right picture of the
98 * hypervisor register state. With CPUMHyperSetCtxCore() this is
99 * getting much worse. So, use the individual functions for getting
100 * and esp. setting the hypervisor registers.
101 */
102VMMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
103{
104 *ppCtx = &pVM->cpum.s.Hyper;
105 return VINF_SUCCESS;
106}
107
108
109VMMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
110{
111 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
112 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
113 pVM->cpum.s.Hyper.gdtrPadding = 0;
114}
115
116
117VMMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
118{
119 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
120 pVM->cpum.s.Hyper.idtr.pIdt = addr;
121 pVM->cpum.s.Hyper.idtrPadding = 0;
122}
123
124
125VMMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
126{
127 pVM->cpum.s.Hyper.cr3 = cr3;
128
129#ifdef IN_RC
130 /* Update the current CR3. */
131 ASMSetCR3(cr3);
132#endif
133}
134
135VMMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM)
136{
137 return pVM->cpum.s.Hyper.cr3;
138}
139
140
141VMMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
142{
143 pVM->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
144}
145
146
147VMMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
148{
149 pVM->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
150}
151
152
153VMMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
154{
155 pVM->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
156}
157
158
159VMMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
160{
161 pVM->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
162}
163
164
165VMMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
166{
167 pVM->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
168}
169
170
171VMMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
172{
173 pVM->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
174}
175
176
177VMMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
178{
179 pVM->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
180}
181
182
183VMMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
184{
185 pVM->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
186 return VINF_SUCCESS;
187}
188
189
190VMMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
191{
192 pVM->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
193}
194
195
196VMMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
197{
198 pVM->cpum.s.Hyper.tr = SelTR;
199}
200
201
202VMMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
203{
204 pVM->cpum.s.Hyper.ldtr = SelLDTR;
205}
206
207
208VMMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
209{
210 pVM->cpum.s.Hyper.dr[0] = uDr0;
211 /** @todo in GC we must load it! */
212}
213
214
215VMMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
216{
217 pVM->cpum.s.Hyper.dr[1] = uDr1;
218 /** @todo in GC we must load it! */
219}
220
221
222VMMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
223{
224 pVM->cpum.s.Hyper.dr[2] = uDr2;
225 /** @todo in GC we must load it! */
226}
227
228
229VMMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
230{
231 pVM->cpum.s.Hyper.dr[3] = uDr3;
232 /** @todo in GC we must load it! */
233}
234
235
236VMMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
237{
238 pVM->cpum.s.Hyper.dr[6] = uDr6;
239 /** @todo in GC we must load it! */
240}
241
242
243VMMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
244{
245 pVM->cpum.s.Hyper.dr[7] = uDr7;
246 /** @todo in GC we must load it! */
247}
248
249
250VMMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
251{
252 return pVM->cpum.s.CTX_SUFF(pHyperCore)->cs;
253}
254
255
256VMMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
257{
258 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ds;
259}
260
261
262VMMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
263{
264 return pVM->cpum.s.CTX_SUFF(pHyperCore)->es;
265}
266
267
268VMMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
269{
270 return pVM->cpum.s.CTX_SUFF(pHyperCore)->fs;
271}
272
273
274VMMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
275{
276 return pVM->cpum.s.CTX_SUFF(pHyperCore)->gs;
277}
278
279
280VMMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
281{
282 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ss;
283}
284
285
286VMMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
287{
288 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eax;
289}
290
291
292VMMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
293{
294 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ebx;
295}
296
297
298VMMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
299{
300 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ecx;
301}
302
303
304VMMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
305{
306 return pVM->cpum.s.CTX_SUFF(pHyperCore)->edx;
307}
308
309
310VMMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
311{
312 return pVM->cpum.s.CTX_SUFF(pHyperCore)->esi;
313}
314
315
316VMMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
317{
318 return pVM->cpum.s.CTX_SUFF(pHyperCore)->edi;
319}
320
321
322VMMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
323{
324 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ebp;
325}
326
327
328VMMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
329{
330 return pVM->cpum.s.CTX_SUFF(pHyperCore)->esp;
331}
332
333
334VMMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
335{
336 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
337}
338
339
340VMMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
341{
342 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eip;
343}
344
345
346VMMDECL(uint64_t) CPUMGetHyperRIP(PVM pVM)
347{
348 return pVM->cpum.s.CTX_SUFF(pHyperCore)->rip;
349}
350
351
352VMMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
353{
354 if (pcbLimit)
355 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
356 return pVM->cpum.s.Hyper.idtr.pIdt;
357}
358
359
360VMMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
361{
362 if (pcbLimit)
363 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
364 return pVM->cpum.s.Hyper.gdtr.pGdt;
365}
366
367
368VMMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
369{
370 return pVM->cpum.s.Hyper.ldtr;
371}
372
373
374VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
375{
376 return pVM->cpum.s.Hyper.dr[0];
377}
378
379
380VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
381{
382 return pVM->cpum.s.Hyper.dr[1];
383}
384
385
386VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
387{
388 return pVM->cpum.s.Hyper.dr[2];
389}
390
391
392VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
393{
394 return pVM->cpum.s.Hyper.dr[3];
395}
396
397
398VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
399{
400 return pVM->cpum.s.Hyper.dr[6];
401}
402
403
404VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
405{
406 return pVM->cpum.s.Hyper.dr[7];
407}
408
409
410/**
411 * Gets the pointer to the internal CPUMCTXCORE structure.
412 * This is only for reading in order to save a few calls.
413 *
414 * @param pVM Handle to the virtual machine.
415 */
416VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
417{
418 VM_ASSERT_EMT(pVM);
419 return CPUMCTX2CORE(&pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest);
420}
421
422/**
423 * Gets the pointer to the internal CPUMCTXCORE structure.
424 * This is only for reading in order to save a few calls.
425 *
426 * @param pVM Handle to the virtual machine.
427 */
428VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCoreEx(PVM pVM, PVMCPU pVCpu)
429{
430 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
431}
432
433
434/**
435 * Sets the guest context core registers.
436 *
437 * @param pVM Handle to the virtual machine.
438 * @param pCtxCore The new context core values.
439 */
440VMMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
441{
442 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
443
444 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest);
445 *pCtxCoreDst = *pCtxCore;
446
447 /* Mask away invalid parts of the cpu context. */
448 if (!CPUMIsGuestInLongMode(pVM))
449 {
450 uint64_t u64Mask = UINT64_C(0xffffffff);
451
452 pCtxCoreDst->rip &= u64Mask;
453 pCtxCoreDst->rax &= u64Mask;
454 pCtxCoreDst->rbx &= u64Mask;
455 pCtxCoreDst->rcx &= u64Mask;
456 pCtxCoreDst->rdx &= u64Mask;
457 pCtxCoreDst->rsi &= u64Mask;
458 pCtxCoreDst->rdi &= u64Mask;
459 pCtxCoreDst->rbp &= u64Mask;
460 pCtxCoreDst->rsp &= u64Mask;
461 pCtxCoreDst->rflags.u &= u64Mask;
462
463 pCtxCoreDst->r8 = 0;
464 pCtxCoreDst->r9 = 0;
465 pCtxCoreDst->r10 = 0;
466 pCtxCoreDst->r11 = 0;
467 pCtxCoreDst->r12 = 0;
468 pCtxCoreDst->r13 = 0;
469 pCtxCoreDst->r14 = 0;
470 pCtxCoreDst->r15 = 0;
471 }
472}
473
474
475/**
476 * Queries the pointer to the internal CPUMCTX structure
477 *
478 * @returns The CPUMCTX pointer.
479 * @param pVM Handle to the virtual machine.
480 */
481VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVM pVM)
482{
483 return &pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest;
484}
485
486static PCPUMCPU cpumGetCpumCpu(PVM pVM)
487{
488 RTCPUID idCpu = VMMGetCpuId(pVM);
489
490 return &pVM->aCpus[idCpu].cpum.s;
491}
492
493VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtrEx(PVM pVM, PVMCPU pVCpu)
494{
495 Assert(pVCpu->idCpu < pVM->cCPUs);
496 return &pVCpu->cpum.s.Guest;
497}
498
499VMMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
500{
501 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
502
503 pCpumCpu->Guest.gdtr.cbGdt = limit;
504 pCpumCpu->Guest.gdtr.pGdt = addr;
505 pCpumCpu->fChanged |= CPUM_CHANGED_GDTR;
506 return VINF_SUCCESS;
507}
508
509VMMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
510{
511 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
512
513 pCpumCpu->Guest.idtr.cbIdt = limit;
514 pCpumCpu->Guest.idtr.pIdt = addr;
515 pCpumCpu->fChanged |= CPUM_CHANGED_IDTR;
516 return VINF_SUCCESS;
517}
518
519VMMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
520{
521 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
522 AssertMsgFailed(("Need to load the hidden bits too!\n"));
523
524 pCpumCpu->Guest.tr = tr;
525 pCpumCpu->fChanged |= CPUM_CHANGED_TR;
526 return VINF_SUCCESS;
527}
528
529VMMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
530{
531 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
532
533 pCpumCpu->Guest.ldtr = ldtr;
534 pCpumCpu->fChanged |= CPUM_CHANGED_LDTR;
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Set the guest CR0.
541 *
542 * When called in GC, the hyper CR0 may be updated if that is
543 * required. The caller only has to take special action if AM,
544 * WP, PG or PE changes.
545 *
546 * @returns VINF_SUCCESS (consider it void).
547 * @param pVM Pointer to the shared VM structure.
548 * @param cr0 The new CR0 value.
549 */
550VMMDECL(int) CPUMSetGuestCR0(PVM pVM, uint64_t cr0)
551{
552 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
553
554#ifdef IN_RC
555 /*
556 * Check if we need to change hypervisor CR0 because
557 * of math stuff.
558 */
559 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
560 != (pCpumCpu->Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
561 {
562 if (!(pCpumCpu->fUseFlags & CPUM_USED_FPU))
563 {
564 /*
565 * We haven't saved the host FPU state yet, so TS and MT are both set
566 * and EM should be reflecting the guest EM (it always does this).
567 */
568 if ((cr0 & X86_CR0_EM) != (pCpumCpu->Guest.cr0 & X86_CR0_EM))
569 {
570 uint32_t HyperCR0 = ASMGetCR0();
571 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
572 AssertMsg((HyperCR0 & X86_CR0_EM) == (pCpumCpu->Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
573 HyperCR0 &= ~X86_CR0_EM;
574 HyperCR0 |= cr0 & X86_CR0_EM;
575 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
576 ASMSetCR0(HyperCR0);
577 }
578# ifdef VBOX_STRICT
579 else
580 {
581 uint32_t HyperCR0 = ASMGetCR0();
582 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
583 AssertMsg((HyperCR0 & X86_CR0_EM) == (pCpumCpu->Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
584 }
585# endif
586 }
587 else
588 {
589 /*
590 * Already saved the state, so we're just mirroring
591 * the guest flags.
592 */
593 uint32_t HyperCR0 = ASMGetCR0();
594 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
595 == (pCpumCpu->Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
596 ("%#x %#x\n", HyperCR0, pCpumCpu->Guest.cr0));
597 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
598 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
599 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
600 ASMSetCR0(HyperCR0);
601 }
602 }
603#endif /* IN_RC */
604
605 /*
606 * Check for changes causing TLB flushes (for REM).
607 * The caller is responsible for calling PGM when appropriate.
608 */
609 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
610 != (pCpumCpu->Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
611 pCpumCpu->fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
612 pCpumCpu->fChanged |= CPUM_CHANGED_CR0;
613
614 pCpumCpu->Guest.cr0 = cr0 | X86_CR0_ET;
615 return VINF_SUCCESS;
616}
617
618
619VMMDECL(int) CPUMSetGuestCR2(PVM pVM, uint64_t cr2)
620{
621 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
622
623 pCpumCpu->Guest.cr2 = cr2;
624 return VINF_SUCCESS;
625}
626
627
628VMMDECL(int) CPUMSetGuestCR3(PVM pVM, uint64_t cr3)
629{
630 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
631
632 pCpumCpu->Guest.cr3 = cr3;
633 pCpumCpu->fChanged |= CPUM_CHANGED_CR3;
634 return VINF_SUCCESS;
635}
636
637
638VMMDECL(int) CPUMSetGuestCR4(PVM pVM, uint64_t cr4)
639{
640 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
641
642 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
643 != (pCpumCpu->Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
644 pCpumCpu->fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
645 pCpumCpu->fChanged |= CPUM_CHANGED_CR4;
646 if (!CPUMSupportsFXSR(pVM))
647 cr4 &= ~X86_CR4_OSFSXR;
648 pCpumCpu->Guest.cr4 = cr4;
649 return VINF_SUCCESS;
650}
651
652
653VMMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
654{
655 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
656
657 pCpumCpu->Guest.eflags.u32 = eflags;
658 return VINF_SUCCESS;
659}
660
661
662VMMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
663{
664 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
665
666 pCpumCpu->Guest.eip = eip;
667 return VINF_SUCCESS;
668}
669
670
671VMMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
672{
673 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
674
675 pCpumCpu->Guest.eax = eax;
676 return VINF_SUCCESS;
677}
678
679
680VMMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
681{
682 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
683
684 pCpumCpu->Guest.ebx = ebx;
685 return VINF_SUCCESS;
686}
687
688
689VMMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
690{
691 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
692
693 pCpumCpu->Guest.ecx = ecx;
694 return VINF_SUCCESS;
695}
696
697
698VMMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
699{
700 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
701
702 pCpumCpu->Guest.edx = edx;
703 return VINF_SUCCESS;
704}
705
706
707VMMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
708{
709 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
710
711 pCpumCpu->Guest.esp = esp;
712 return VINF_SUCCESS;
713}
714
715
716VMMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
717{
718 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
719
720 pCpumCpu->Guest.ebp = ebp;
721 return VINF_SUCCESS;
722}
723
724
725VMMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
726{
727 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
728
729 pCpumCpu->Guest.esi = esi;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
735{
736 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
737
738 pCpumCpu->Guest.edi = edi;
739 return VINF_SUCCESS;
740}
741
742
743VMMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
744{
745 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
746
747 pCpumCpu->Guest.ss = ss;
748 return VINF_SUCCESS;
749}
750
751
752VMMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
753{
754 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
755
756 pCpumCpu->Guest.cs = cs;
757 return VINF_SUCCESS;
758}
759
760
761VMMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
762{
763 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
764
765 pCpumCpu->Guest.ds = ds;
766 return VINF_SUCCESS;
767}
768
769
770VMMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
771{
772 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
773
774 pCpumCpu->Guest.es = es;
775 return VINF_SUCCESS;
776}
777
778
779VMMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
780{
781 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
782
783 pCpumCpu->Guest.fs = fs;
784 return VINF_SUCCESS;
785}
786
787
788VMMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
789{
790 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
791
792 pCpumCpu->Guest.gs = gs;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(void) CPUMSetGuestEFER(PVM pVM, uint64_t val)
798{
799 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
800
801 pCpumCpu->Guest.msrEFER = val;
802}
803
804
805VMMDECL(uint64_t) CPUMGetGuestMsr(PVM pVM, unsigned idMsr)
806{
807 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
808 uint64_t u64 = 0;
809
810 switch (idMsr)
811 {
812 case MSR_IA32_CR_PAT:
813 u64 = pCpumCpu->Guest.msrPAT;
814 break;
815
816 case MSR_IA32_SYSENTER_CS:
817 u64 = pCpumCpu->Guest.SysEnter.cs;
818 break;
819
820 case MSR_IA32_SYSENTER_EIP:
821 u64 = pCpumCpu->Guest.SysEnter.eip;
822 break;
823
824 case MSR_IA32_SYSENTER_ESP:
825 u64 = pCpumCpu->Guest.SysEnter.esp;
826 break;
827
828 case MSR_K6_EFER:
829 u64 = pCpumCpu->Guest.msrEFER;
830 break;
831
832 case MSR_K8_SF_MASK:
833 u64 = pCpumCpu->Guest.msrSFMASK;
834 break;
835
836 case MSR_K6_STAR:
837 u64 = pCpumCpu->Guest.msrSTAR;
838 break;
839
840 case MSR_K8_LSTAR:
841 u64 = pCpumCpu->Guest.msrLSTAR;
842 break;
843
844 case MSR_K8_CSTAR:
845 u64 = pCpumCpu->Guest.msrCSTAR;
846 break;
847
848 case MSR_K8_KERNEL_GS_BASE:
849 u64 = pCpumCpu->Guest.msrKERNELGSBASE;
850 break;
851
852 case MSR_K8_TSC_AUX:
853 u64 = pCpumCpu->GuestMsr.msr.tscAux;
854 break;
855
856 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
857 default:
858 AssertFailed();
859 break;
860 }
861 return u64;
862}
863
864VMMDECL(void) CPUMSetGuestMsr(PVM pVM, unsigned idMsr, uint64_t valMsr)
865{
866 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
867
868 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
869 switch (idMsr)
870 {
871 case MSR_K8_TSC_AUX:
872 pCpumCpu->GuestMsr.msr.tscAux = valMsr;
873 break;
874
875 default:
876 AssertFailed();
877 break;
878 }
879}
880
881VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
882{
883 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
884
885 if (pcbLimit)
886 *pcbLimit = pCpumCpu->Guest.idtr.cbIdt;
887 return pCpumCpu->Guest.idtr.pIdt;
888}
889
890
891VMMDECL(RTSEL) CPUMGetGuestTR(PVM pVM, PCPUMSELREGHID pHidden)
892{
893 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
894 if (pHidden)
895 *pHidden = pCpumCpu->Guest.trHid;
896 return pCpumCpu->Guest.tr;
897}
898
899
900VMMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
901{
902 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
903
904 return pCpumCpu->Guest.cs;
905}
906
907
908VMMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
909{
910 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
911
912 return pCpumCpu->Guest.ds;
913}
914
915
916VMMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
917{
918 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
919
920 return pCpumCpu->Guest.es;
921}
922
923
924VMMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
925{
926 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
927
928 return pCpumCpu->Guest.fs;
929}
930
931
932VMMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
933{
934 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
935
936 return pCpumCpu->Guest.gs;
937}
938
939
940VMMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
941{
942 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
943
944 return pCpumCpu->Guest.ss;
945}
946
947
948VMMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
949{
950 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
951
952 return pCpumCpu->Guest.ldtr;
953}
954
955
956VMMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM)
957{
958 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
959
960 return pCpumCpu->Guest.cr0;
961}
962
963
964VMMDECL(uint64_t) CPUMGetGuestCR2(PVM pVM)
965{
966 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
967
968 return pCpumCpu->Guest.cr2;
969}
970
971
972VMMDECL(uint64_t) CPUMGetGuestCR3(PVM pVM)
973{
974 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
975
976 return pCpumCpu->Guest.cr3;
977}
978
979
980VMMDECL(uint64_t) CPUMGetGuestCR4(PVM pVM)
981{
982 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
983
984 return pCpumCpu->Guest.cr4;
985}
986
987
988VMMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
989{
990 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
991
992 *pGDTR = pCpumCpu->Guest.gdtr;
993}
994
995
996VMMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
997{
998 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
999
1000 return pCpumCpu->Guest.eip;
1001}
1002
1003
1004VMMDECL(uint64_t) CPUMGetGuestRIP(PVM pVM)
1005{
1006 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1007
1008 return pCpumCpu->Guest.rip;
1009}
1010
1011
1012VMMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
1013{
1014 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1015
1016 return pCpumCpu->Guest.eax;
1017}
1018
1019
1020VMMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
1021{
1022 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1023
1024 return pCpumCpu->Guest.ebx;
1025}
1026
1027
1028VMMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
1029{
1030 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1031
1032 return pCpumCpu->Guest.ecx;
1033}
1034
1035
1036VMMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
1037{
1038 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1039
1040 return pCpumCpu->Guest.edx;
1041}
1042
1043
1044VMMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
1045{
1046 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1047
1048 return pCpumCpu->Guest.esi;
1049}
1050
1051
1052VMMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
1053{
1054 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1055
1056 return pCpumCpu->Guest.edi;
1057}
1058
1059
1060VMMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
1061{
1062 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1063
1064 return pCpumCpu->Guest.esp;
1065}
1066
1067
1068VMMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
1069{
1070 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1071
1072 return pCpumCpu->Guest.ebp;
1073}
1074
1075
1076VMMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
1077{
1078 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1079
1080 return pCpumCpu->Guest.eflags.u32;
1081}
1082
1083
1084///@todo: crx should be an array
1085VMMDECL(int) CPUMGetGuestCRx(PVM pVM, unsigned iReg, uint64_t *pValue)
1086{
1087 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1088
1089 switch (iReg)
1090 {
1091 case USE_REG_CR0:
1092 *pValue = pCpumCpu->Guest.cr0;
1093 break;
1094 case USE_REG_CR2:
1095 *pValue = pCpumCpu->Guest.cr2;
1096 break;
1097 case USE_REG_CR3:
1098 *pValue = pCpumCpu->Guest.cr3;
1099 break;
1100 case USE_REG_CR4:
1101 *pValue = pCpumCpu->Guest.cr4;
1102 break;
1103 default:
1104 return VERR_INVALID_PARAMETER;
1105 }
1106 return VINF_SUCCESS;
1107}
1108
1109
1110VMMDECL(uint64_t) CPUMGetGuestDR0(PVM pVM)
1111{
1112 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1113
1114 return pCpumCpu->Guest.dr[0];
1115}
1116
1117
1118VMMDECL(uint64_t) CPUMGetGuestDR1(PVM pVM)
1119{
1120 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1121
1122 return pCpumCpu->Guest.dr[1];
1123}
1124
1125
1126VMMDECL(uint64_t) CPUMGetGuestDR2(PVM pVM)
1127{
1128 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1129
1130 return pCpumCpu->Guest.dr[2];
1131}
1132
1133
1134VMMDECL(uint64_t) CPUMGetGuestDR3(PVM pVM)
1135{
1136 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1137
1138 return pCpumCpu->Guest.dr[3];
1139}
1140
1141
1142VMMDECL(uint64_t) CPUMGetGuestDR6(PVM pVM)
1143{
1144 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1145
1146 return pCpumCpu->Guest.dr[6];
1147}
1148
1149
1150VMMDECL(uint64_t) CPUMGetGuestDR7(PVM pVM)
1151{
1152 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1153
1154 return pCpumCpu->Guest.dr[7];
1155}
1156
1157
1158VMMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint64_t *pValue)
1159{
1160 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1161
1162 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1163 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1164 if (iReg == 4 || iReg == 5)
1165 iReg += 2;
1166 *pValue = pCpumCpu->Guest.dr[iReg];
1167 return VINF_SUCCESS;
1168}
1169
1170
1171VMMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM)
1172{
1173 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1174
1175 return pCpumCpu->Guest.msrEFER;
1176}
1177
1178
1179/**
1180 * Gets a CpuId leaf.
1181 *
1182 * @param pVM The VM handle.
1183 * @param iLeaf The CPUID leaf to get.
1184 * @param pEax Where to store the EAX value.
1185 * @param pEbx Where to store the EBX value.
1186 * @param pEcx Where to store the ECX value.
1187 * @param pEdx Where to store the EDX value.
1188 */
1189VMMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1190{
1191 PCCPUMCPUID pCpuId;
1192 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1193 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1194 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1195 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1196 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1197 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1198 else
1199 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1200
1201 *pEax = pCpuId->eax;
1202 *pEbx = pCpuId->ebx;
1203 *pEcx = pCpuId->ecx;
1204 *pEdx = pCpuId->edx;
1205 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1206}
1207
1208
1209/**
1210 * Gets a pointer to the array of standard CPUID leafs.
1211 *
1212 * CPUMGetGuestCpuIdStdMax() give the size of the array.
1213 *
1214 * @returns Pointer to the standard CPUID leafs (read-only).
1215 * @param pVM The VM handle.
1216 * @remark Intended for PATM.
1217 */
1218VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdRCPtr(PVM pVM)
1219{
1220 return RCPTRTYPE(PCCPUMCPUID)VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
1221}
1222
1223
1224/**
1225 * Gets a pointer to the array of extended CPUID leafs.
1226 *
1227 * CPUMGetGuestCpuIdExtMax() give the size of the array.
1228 *
1229 * @returns Pointer to the extended CPUID leafs (read-only).
1230 * @param pVM The VM handle.
1231 * @remark Intended for PATM.
1232 */
1233VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtRCPtr(PVM pVM)
1234{
1235 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
1236}
1237
1238
1239/**
1240 * Gets a pointer to the array of centaur CPUID leafs.
1241 *
1242 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
1243 *
1244 * @returns Pointer to the centaur CPUID leafs (read-only).
1245 * @param pVM The VM handle.
1246 * @remark Intended for PATM.
1247 */
1248VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurRCPtr(PVM pVM)
1249{
1250 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
1251}
1252
1253
1254/**
1255 * Gets a pointer to the default CPUID leaf.
1256 *
1257 * @returns Pointer to the default CPUID leaf (read-only).
1258 * @param pVM The VM handle.
1259 * @remark Intended for PATM.
1260 */
1261VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefRCPtr(PVM pVM)
1262{
1263 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
1264}
1265
1266
1267/**
1268 * Gets a number of standard CPUID leafs.
1269 *
1270 * @returns Number of leafs.
1271 * @param pVM The VM handle.
1272 * @remark Intended for PATM.
1273 */
1274VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1275{
1276 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1277}
1278
1279
1280/**
1281 * Gets a number of extended CPUID leafs.
1282 *
1283 * @returns Number of leafs.
1284 * @param pVM The VM handle.
1285 * @remark Intended for PATM.
1286 */
1287VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1288{
1289 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1290}
1291
1292
1293/**
1294 * Gets a number of centaur CPUID leafs.
1295 *
1296 * @returns Number of leafs.
1297 * @param pVM The VM handle.
1298 * @remark Intended for PATM.
1299 */
1300VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1301{
1302 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1303}
1304
1305
1306/**
1307 * Sets a CPUID feature bit.
1308 *
1309 * @param pVM The VM Handle.
1310 * @param enmFeature The feature to set.
1311 */
1312VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1313{
1314 switch (enmFeature)
1315 {
1316 /*
1317 * Set the APIC bit in both feature masks.
1318 */
1319 case CPUMCPUIDFEATURE_APIC:
1320 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1321 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1322 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1323 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1324 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1325 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1326 break;
1327
1328 /*
1329 * Set the x2APIC bit in the standard feature mask.
1330 */
1331 case CPUMCPUIDFEATURE_X2APIC:
1332 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1333 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1334 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1335 break;
1336
1337 /*
1338 * Set the sysenter/sysexit bit in the standard feature mask.
1339 * Assumes the caller knows what it's doing! (host must support these)
1340 */
1341 case CPUMCPUIDFEATURE_SEP:
1342 {
1343 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1344 {
1345 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1346 return;
1347 }
1348
1349 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1350 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1351 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1352 break;
1353 }
1354
1355 /*
1356 * Set the syscall/sysret bit in the extended feature mask.
1357 * Assumes the caller knows what it's doing! (host must support these)
1358 */
1359 case CPUMCPUIDFEATURE_SYSCALL:
1360 {
1361 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1362 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1363 {
1364#if HC_ARCH_BITS == 32
1365 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
1366 * Even when the cpu is capable of doing so in 64 bits mode.
1367 */
1368 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1369 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1370 || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1371#endif
1372 {
1373 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1374 return;
1375 }
1376 }
1377 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1378 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1379 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1380 break;
1381 }
1382
1383 /*
1384 * Set the PAE bit in both feature masks.
1385 * Assumes the caller knows what it's doing! (host must support these)
1386 */
1387 case CPUMCPUIDFEATURE_PAE:
1388 {
1389 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1390 {
1391 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1392 return;
1393 }
1394
1395 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1396 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1397 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1398 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1399 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1400 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1401 break;
1402 }
1403
1404 /*
1405 * Set the LONG MODE bit in the extended feature mask.
1406 * Assumes the caller knows what it's doing! (host must support these)
1407 */
1408 case CPUMCPUIDFEATURE_LONG_MODE:
1409 {
1410 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1411 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1412 {
1413 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1414 return;
1415 }
1416
1417 /* Valid for both Intel and AMD. */
1418 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1419 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1420 break;
1421 }
1422
1423 /*
1424 * Set the NXE bit in the extended feature mask.
1425 * Assumes the caller knows what it's doing! (host must support these)
1426 */
1427 case CPUMCPUIDFEATURE_NXE:
1428 {
1429 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1430 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1431 {
1432 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1433 return;
1434 }
1435
1436 /* Valid for both Intel and AMD. */
1437 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1438 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1439 break;
1440 }
1441
1442 case CPUMCPUIDFEATURE_LAHF:
1443 {
1444 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1445 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1446 {
1447 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1448 return;
1449 }
1450
1451 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1452 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1453 break;
1454 }
1455
1456 case CPUMCPUIDFEATURE_PAT:
1457 {
1458 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1459 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1460 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1461 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1462 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1463 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1464 break;
1465 }
1466
1467 case CPUMCPUIDFEATURE_RDTSCP:
1468 {
1469 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1470 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP))
1471 {
1472 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1473 return;
1474 }
1475
1476 /* Valid for AMD only (for now). */
1477 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
1478 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1479 break;
1480 }
1481
1482 default:
1483 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1484 break;
1485 }
1486 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1487
1488 pCpumCpu->fChanged |= CPUM_CHANGED_CPUID;
1489}
1490
1491
1492/**
1493 * Queries a CPUID feature bit.
1494 *
1495 * @returns boolean for feature presence
1496 * @param pVM The VM Handle.
1497 * @param enmFeature The feature to query.
1498 */
1499VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1500{
1501 switch (enmFeature)
1502 {
1503 case CPUMCPUIDFEATURE_PAE:
1504 {
1505 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1506 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1507 break;
1508 }
1509
1510 case CPUMCPUIDFEATURE_RDTSCP:
1511 {
1512 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1513 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
1514 break;
1515 }
1516
1517 case CPUMCPUIDFEATURE_LONG_MODE:
1518 {
1519 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1520 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
1521 break;
1522 }
1523
1524 default:
1525 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1526 break;
1527 }
1528 return false;
1529}
1530
1531
1532/**
1533 * Clears a CPUID feature bit.
1534 *
1535 * @param pVM The VM Handle.
1536 * @param enmFeature The feature to clear.
1537 */
1538VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1539{
1540 switch (enmFeature)
1541 {
1542 /*
1543 * Set the APIC bit in both feature masks.
1544 */
1545 case CPUMCPUIDFEATURE_APIC:
1546 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1547 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1548 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1549 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1550 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1551 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1552 break;
1553
1554 /*
1555 * Clear the x2APIC bit in the standard feature mask.
1556 */
1557 case CPUMCPUIDFEATURE_X2APIC:
1558 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1559 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1560 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1561 break;
1562
1563 case CPUMCPUIDFEATURE_PAE:
1564 {
1565 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1566 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1567 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1568 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1569 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1570 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1571 break;
1572 }
1573
1574 case CPUMCPUIDFEATURE_PAT:
1575 {
1576 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1577 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1578 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1579 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1580 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1581 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1582 break;
1583 }
1584
1585 case CPUMCPUIDFEATURE_LONG_MODE:
1586 {
1587 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1588 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1589 break;
1590 }
1591
1592 case CPUMCPUIDFEATURE_LAHF:
1593 {
1594 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1595 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1596 break;
1597 }
1598
1599 default:
1600 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1601 break;
1602 }
1603 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1604 pCpumCpu->fChanged |= CPUM_CHANGED_CPUID;
1605}
1606
1607
1608/**
1609 * Gets the CPU vendor
1610 *
1611 * @returns CPU vendor
1612 * @param pVM The VM handle.
1613 */
1614VMMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM)
1615{
1616 return pVM->cpum.s.enmCPUVendor;
1617}
1618
1619
1620VMMDECL(int) CPUMSetGuestDR0(PVM pVM, uint64_t uDr0)
1621{
1622 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1623
1624 pCpumCpu->Guest.dr[0] = uDr0;
1625 return CPUMRecalcHyperDRx(pVM);
1626}
1627
1628
1629VMMDECL(int) CPUMSetGuestDR1(PVM pVM, uint64_t uDr1)
1630{
1631 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1632
1633 pCpumCpu->Guest.dr[1] = uDr1;
1634 return CPUMRecalcHyperDRx(pVM);
1635}
1636
1637
1638VMMDECL(int) CPUMSetGuestDR2(PVM pVM, uint64_t uDr2)
1639{
1640 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1641
1642 pCpumCpu->Guest.dr[2] = uDr2;
1643 return CPUMRecalcHyperDRx(pVM);
1644}
1645
1646
1647VMMDECL(int) CPUMSetGuestDR3(PVM pVM, uint64_t uDr3)
1648{
1649 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1650
1651 pCpumCpu->Guest.dr[3] = uDr3;
1652 return CPUMRecalcHyperDRx(pVM);
1653}
1654
1655
1656VMMDECL(int) CPUMSetGuestDR6(PVM pVM, uint64_t uDr6)
1657{
1658 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1659
1660 pCpumCpu->Guest.dr[6] = uDr6;
1661 return CPUMRecalcHyperDRx(pVM);
1662}
1663
1664
1665VMMDECL(int) CPUMSetGuestDR7(PVM pVM, uint64_t uDr7)
1666{
1667 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1668
1669 pCpumCpu->Guest.dr[7] = uDr7;
1670 return CPUMRecalcHyperDRx(pVM);
1671}
1672
1673
1674VMMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint64_t Value)
1675{
1676 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1677
1678 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1679 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1680 if (iReg == 4 || iReg == 5)
1681 iReg += 2;
1682 pCpumCpu->Guest.dr[iReg] = Value;
1683 return CPUMRecalcHyperDRx(pVM);
1684}
1685
1686
1687/**
1688 * Recalculates the hypvervisor DRx register values based on
1689 * current guest registers and DBGF breakpoints.
1690 *
1691 * This is called whenever a guest DRx register is modified and when DBGF
1692 * sets a hardware breakpoint. In guest context this function will reload
1693 * any (hyper) DRx registers which comes out with a different value.
1694 *
1695 * @returns VINF_SUCCESS.
1696 * @param pVM The VM handle.
1697 */
1698VMMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1699{
1700 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1701 /*
1702 * Compare the DR7s first.
1703 *
1704 * We only care about the enabled flags. The GE and LE flags are always
1705 * set and we don't care if the guest doesn't set them. GD is virtualized
1706 * when we dispatch #DB, we never enable it.
1707 */
1708 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1709#ifdef CPUM_VIRTUALIZE_DRX
1710 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1711#else
1712 const RTGCUINTREG uGstDr7 = 0;
1713#endif
1714 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1715 {
1716 /*
1717 * Ok, something is enabled. Recalc each of the breakpoints.
1718 * Straight forward code, not optimized/minimized in any way.
1719 */
1720 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1721
1722 /* bp 0 */
1723 RTGCUINTREG uNewDr0;
1724 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1725 {
1726 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1727 uNewDr0 = DBGFBpGetDR0(pVM);
1728 }
1729 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1730 {
1731 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1732 uNewDr0 = CPUMGetGuestDR0(pVM);
1733 }
1734 else
1735 uNewDr0 = pVM->cpum.s.Hyper.dr[0];
1736
1737 /* bp 1 */
1738 RTGCUINTREG uNewDr1;
1739 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1740 {
1741 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1742 uNewDr1 = DBGFBpGetDR1(pVM);
1743 }
1744 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1745 {
1746 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1747 uNewDr1 = CPUMGetGuestDR1(pVM);
1748 }
1749 else
1750 uNewDr1 = pVM->cpum.s.Hyper.dr[1];
1751
1752 /* bp 2 */
1753 RTGCUINTREG uNewDr2;
1754 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1755 {
1756 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1757 uNewDr2 = DBGFBpGetDR2(pVM);
1758 }
1759 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1760 {
1761 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1762 uNewDr2 = CPUMGetGuestDR2(pVM);
1763 }
1764 else
1765 uNewDr2 = pVM->cpum.s.Hyper.dr[2];
1766
1767 /* bp 3 */
1768 RTGCUINTREG uNewDr3;
1769 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1770 {
1771 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1772 uNewDr3 = DBGFBpGetDR3(pVM);
1773 }
1774 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1775 {
1776 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1777 uNewDr3 = CPUMGetGuestDR3(pVM);
1778 }
1779 else
1780 uNewDr3 = pVM->cpum.s.Hyper.dr[3];
1781
1782 /*
1783 * Apply the updates.
1784 */
1785#ifdef IN_RC
1786 if (!(pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS))
1787 {
1788 /** @todo save host DBx registers. */
1789 }
1790#endif
1791 pCpumCpu->fUseFlags |= CPUM_USE_DEBUG_REGS;
1792 if (uNewDr3 != pVM->cpum.s.Hyper.dr[3])
1793 CPUMSetHyperDR3(pVM, uNewDr3);
1794 if (uNewDr2 != pVM->cpum.s.Hyper.dr[2])
1795 CPUMSetHyperDR2(pVM, uNewDr2);
1796 if (uNewDr1 != pVM->cpum.s.Hyper.dr[1])
1797 CPUMSetHyperDR1(pVM, uNewDr1);
1798 if (uNewDr0 != pVM->cpum.s.Hyper.dr[0])
1799 CPUMSetHyperDR0(pVM, uNewDr0);
1800 if (uNewDr7 != pVM->cpum.s.Hyper.dr[7])
1801 CPUMSetHyperDR7(pVM, uNewDr7);
1802 }
1803 else
1804 {
1805#ifdef IN_RC
1806 if (pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS)
1807 {
1808 /** @todo restore host DBx registers. */
1809 }
1810#endif
1811 pCpumCpu->fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1812 }
1813 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1814 pCpumCpu->fUseFlags, pVM->cpum.s.Hyper.dr[0], pVM->cpum.s.Hyper.dr[1],
1815 pVM->cpum.s.Hyper.dr[2], pVM->cpum.s.Hyper.dr[3], pVM->cpum.s.Hyper.dr[6],
1816 pVM->cpum.s.Hyper.dr[7]));
1817
1818 return VINF_SUCCESS;
1819}
1820
1821#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1822
1823/**
1824 * Transforms the guest CPU state to raw-ring mode.
1825 *
1826 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1827 *
1828 * @returns VBox status. (recompiler failure)
1829 * @param pVM VM handle.
1830 * @param pCtxCore The context core (for trap usage).
1831 * @see @ref pg_raw
1832 */
1833VMMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1834{
1835 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1836
1837 Assert(!pVM->cpum.s.fRawEntered);
1838 if (!pCtxCore)
1839 pCtxCore = CPUMCTX2CORE(&pCpumCpu->Guest);
1840
1841 /*
1842 * Are we in Ring-0?
1843 */
1844 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1845 && !pCtxCore->eflags.Bits.u1VM)
1846 {
1847 /*
1848 * Enter execution mode.
1849 */
1850 PATMRawEnter(pVM, pCtxCore);
1851
1852 /*
1853 * Set CPL to Ring-1.
1854 */
1855 pCtxCore->ss |= 1;
1856 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1857 pCtxCore->cs |= 1;
1858 }
1859 else
1860 {
1861 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1862 ("ring-1 code not supported\n"));
1863 /*
1864 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1865 */
1866 PATMRawEnter(pVM, pCtxCore);
1867 }
1868
1869 /*
1870 * Assert sanity.
1871 */
1872 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1873 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1874 || pCtxCore->eflags.Bits.u1VM,
1875 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1876 Assert((pCpumCpu->Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1877 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1878
1879 pVM->cpum.s.fRawEntered = true;
1880 return VINF_SUCCESS;
1881}
1882
1883
1884/**
1885 * Transforms the guest CPU state from raw-ring mode to correct values.
1886 *
1887 * This function will change any selector registers with DPL=1 to DPL=0.
1888 *
1889 * @returns Adjusted rc.
1890 * @param pVM VM handle.
1891 * @param rc Raw mode return code
1892 * @param pCtxCore The context core (for trap usage).
1893 * @see @ref pg_raw
1894 */
1895VMMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1896{
1897 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1898
1899 /*
1900 * Don't leave if we've already left (in GC).
1901 */
1902 Assert(pVM->cpum.s.fRawEntered);
1903 if (!pVM->cpum.s.fRawEntered)
1904 return rc;
1905 pVM->cpum.s.fRawEntered = false;
1906
1907 PCPUMCTX pCtx = &pCpumCpu->Guest;
1908 if (!pCtxCore)
1909 pCtxCore = CPUMCTX2CORE(pCtx);
1910 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1911 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1912 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1913
1914 /*
1915 * Are we executing in raw ring-1?
1916 */
1917 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1918 && !pCtxCore->eflags.Bits.u1VM)
1919 {
1920 /*
1921 * Leave execution mode.
1922 */
1923 PATMRawLeave(pVM, pCtxCore, rc);
1924 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1925 /** @todo See what happens if we remove this. */
1926 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1927 pCtxCore->ds &= ~X86_SEL_RPL;
1928 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1929 pCtxCore->es &= ~X86_SEL_RPL;
1930 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1931 pCtxCore->fs &= ~X86_SEL_RPL;
1932 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1933 pCtxCore->gs &= ~X86_SEL_RPL;
1934
1935 /*
1936 * Ring-1 selector => Ring-0.
1937 */
1938 pCtxCore->ss &= ~X86_SEL_RPL;
1939 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1940 pCtxCore->cs &= ~X86_SEL_RPL;
1941 }
1942 else
1943 {
1944 /*
1945 * PATM is taking care of the IOPL and IF flags for us.
1946 */
1947 PATMRawLeave(pVM, pCtxCore, rc);
1948 if (!pCtxCore->eflags.Bits.u1VM)
1949 {
1950 /** @todo See what happens if we remove this. */
1951 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1952 pCtxCore->ds &= ~X86_SEL_RPL;
1953 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1954 pCtxCore->es &= ~X86_SEL_RPL;
1955 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1956 pCtxCore->fs &= ~X86_SEL_RPL;
1957 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1958 pCtxCore->gs &= ~X86_SEL_RPL;
1959 }
1960 }
1961
1962 return rc;
1963}
1964
1965/**
1966 * Updates the EFLAGS while we're in raw-mode.
1967 *
1968 * @param pVM The VM handle.
1969 * @param pCtxCore The context core.
1970 * @param eflags The new EFLAGS value.
1971 */
1972VMMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1973{
1974 if (!pVM->cpum.s.fRawEntered)
1975 {
1976 pCtxCore->eflags.u32 = eflags;
1977 return;
1978 }
1979 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1980}
1981
1982#endif /* !IN_RING0 */
1983
1984/**
1985 * Gets the EFLAGS while we're in raw-mode.
1986 *
1987 * @returns The eflags.
1988 * @param pVM The VM handle.
1989 * @param pCtxCore The context core.
1990 */
1991VMMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1992{
1993#ifdef IN_RING0
1994 return pCtxCore->eflags.u32;
1995#else
1996 if (!pVM->cpum.s.fRawEntered)
1997 return pCtxCore->eflags.u32;
1998 return PATMRawGetEFlags(pVM, pCtxCore);
1999#endif
2000}
2001
2002
2003/**
2004 * Gets and resets the changed flags (CPUM_CHANGED_*).
2005 * Only REM should call this function.
2006 *
2007 * @returns The changed flags.
2008 * @param pVM The VM handle.
2009 */
2010VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
2011{
2012 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2013
2014 unsigned fFlags = pCpumCpu->fChanged;
2015 pCpumCpu->fChanged = 0;
2016 /** @todo change the switcher to use the fChanged flags. */
2017 if (pCpumCpu->fUseFlags & CPUM_USED_FPU_SINCE_REM)
2018 {
2019 fFlags |= CPUM_CHANGED_FPU_REM;
2020 pCpumCpu->fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2021 }
2022 return fFlags;
2023}
2024
2025
2026/**
2027 * Sets the specified changed flags (CPUM_CHANGED_*).
2028 *
2029 * @param pVM The VM handle.
2030 */
2031VMMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
2032{
2033 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2034
2035 pCpumCpu->fChanged |= fChangedFlags;
2036}
2037
2038
2039/**
2040 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2041 * @returns true if supported.
2042 * @returns false if not supported.
2043 * @param pVM The VM handle.
2044 */
2045VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2046{
2047 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2048}
2049
2050
2051/**
2052 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2053 * @returns true if used.
2054 * @returns false if not used.
2055 * @param pVM The VM handle.
2056 */
2057VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2058{
2059 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2060
2061 return (pCpumCpu->fUseFlags & CPUM_USE_SYSENTER) != 0;
2062}
2063
2064
2065/**
2066 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2067 * @returns true if used.
2068 * @returns false if not used.
2069 * @param pVM The VM handle.
2070 */
2071VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2072{
2073 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2074
2075 return (pCpumCpu->fUseFlags & CPUM_USE_SYSCALL) != 0;
2076}
2077
2078#ifndef IN_RING3
2079
2080/**
2081 * Lazily sync in the FPU/XMM state
2082 *
2083 * @returns VBox status code.
2084 * @param pVM VM handle.
2085 * @param pVCpu VMCPU handle
2086 */
2087VMMDECL(int) CPUMHandleLazyFPU(PVM pVM, PVMCPU pVCpu)
2088{
2089 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2090}
2091
2092#endif /* !IN_RING3 */
2093
2094/**
2095 * Checks if we activated the FPU/XMM state of the guest OS
2096 * @returns true if we did.
2097 * @returns false if not.
2098 * @param pVCpu The VMCPU handle.
2099 */
2100VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2101{
2102 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2103}
2104
2105
2106/**
2107 * Deactivate the FPU/XMM state of the guest OS
2108 * @param pVM The VM handle.
2109 */
2110VMMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
2111{
2112 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2113
2114 pCpumCpu->fUseFlags &= ~CPUM_USED_FPU;
2115}
2116
2117
2118/**
2119 * Checks if the guest debug state is active
2120 *
2121 * @returns boolean
2122 * @param pVM VM handle.
2123 */
2124VMMDECL(bool) CPUMIsGuestDebugStateActive(PVM pVM)
2125{
2126 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2127
2128 return (pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2129}
2130
2131
2132/**
2133 * Mark the guest's debug state as inactive
2134 *
2135 * @returns boolean
2136 * @param pVM VM handle.
2137 */
2138VMMDECL(void) CPUMDeactivateGuestDebugState(PVM pVM)
2139{
2140 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2141
2142 pCpumCpu->fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2143}
2144
2145
2146/**
2147 * Checks if the hidden selector registers are valid
2148 * @returns true if they are.
2149 * @returns false if not.
2150 * @param pVM The VM handle.
2151 */
2152VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
2153{
2154 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
2155}
2156
2157
2158/**
2159 * Checks if the hidden selector registers are valid
2160 * @param pVM The VM handle.
2161 * @param fValid Valid or not
2162 */
2163VMMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
2164{
2165 pVM->cpum.s.fValidHiddenSelRegs = fValid;
2166}
2167
2168
2169/**
2170 * Get the current privilege level of the guest.
2171 *
2172 * @returns cpl
2173 * @param pVM VM Handle.
2174 * @param pRegFrame Trap register frame.
2175 */
2176VMMDECL(uint32_t) CPUMGetGuestCPL(PVM pVM, PCPUMCTXCORE pCtxCore)
2177{
2178 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2179 uint32_t cpl;
2180
2181 if (CPUMAreHiddenSelRegsValid(pVM))
2182 {
2183 /*
2184 * The hidden CS.DPL register is always equal to the CPL, it is
2185 * not affected by loading a conforming coding segment.
2186 *
2187 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2188 * at SS. (ACP2 regression during install after a far call to ring 2)
2189 */
2190 if (RT_LIKELY(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2191 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2192 else
2193 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2194 }
2195 else if (RT_LIKELY(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2196 {
2197 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2198 {
2199 /*
2200 * The SS RPL is always equal to the CPL, while the CS RPL
2201 * isn't necessarily equal if the segment is conforming.
2202 * See section 4.11.1 in the AMD manual.
2203 */
2204 cpl = (pCtxCore->ss & X86_SEL_RPL);
2205#ifndef IN_RING0
2206 if (cpl == 1)
2207 cpl = 0;
2208#endif
2209 }
2210 else
2211 cpl = 3;
2212 }
2213 else
2214 cpl = 0; /* real mode; cpl is zero */
2215
2216 return cpl;
2217}
2218
2219
2220/**
2221 * Gets the current guest CPU mode.
2222 *
2223 * If paging mode is what you need, check out PGMGetGuestMode().
2224 *
2225 * @returns The CPU mode.
2226 * @param pVM The VM handle.
2227 */
2228VMMDECL(CPUMMODE) CPUMGetGuestMode(PVM pVM)
2229{
2230 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2231
2232 CPUMMODE enmMode;
2233 if (!(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2234 enmMode = CPUMMODE_REAL;
2235 else if (!(pCpumCpu->Guest.msrEFER & MSR_K6_EFER_LMA))
2236 enmMode = CPUMMODE_PROTECTED;
2237 else
2238 enmMode = CPUMMODE_LONG;
2239
2240 return enmMode;
2241}
2242
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette