VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 13960

Last change on this file since 13960 was 13960, checked in by vboxsync, 16 years ago

Moved guest and host CPU contexts into per-VCPU array.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 54.3 KB
Line 
1/* $Id: CPUMAllRegs.cpp 13960 2008-11-07 13:04:45Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_CPUM
27#include <VBox/cpum.h>
28#include <VBox/patm.h>
29#include <VBox/dbgf.h>
30#include <VBox/mm.h>
31#include "CPUMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#ifdef IN_RING3
39#include <iprt/thread.h>
40#endif
41
42/** Disable stack frame pointer generation here. */
43#if defined(_MSC_VER) && !defined(DEBUG)
44# pragma optimize("y", off)
45#endif
46
47
48/**
49 * Sets or resets an alternative hypervisor context core.
50 *
51 * This is called when we get a hypervisor trap set switch the context
52 * core with the trap frame on the stack. It is called again to reset
53 * back to the default context core when resuming hypervisor execution.
54 *
55 * @param pVM The VM handle.
56 * @param pCtxCore Pointer to the alternative context core or NULL
57 * to go back to the default context core.
58 */
59VMMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
60{
61 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTX_SUFF(pHyperCore), pCtxCore));
62 if (!pCtxCore)
63 {
64 pCtxCore = CPUMCTX2CORE(&pVM->cpum.s.Hyper);
65 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore);
66 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore);
67 pVM->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_RC_ADDR(pVM, pCtxCore);
68 }
69 else
70 {
71 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore);
72 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore);
73 pVM->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);
74 }
75}
76
77
78/**
79 * Gets the pointer to the internal CPUMCTXCORE structure for the hypervisor.
80 * This is only for reading in order to save a few calls.
81 *
82 * @param pVM Handle to the virtual machine.
83 */
84VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM)
85{
86 return pVM->cpum.s.CTX_SUFF(pHyperCore);
87}
88
89
90/**
91 * Queries the pointer to the internal CPUMCTX structure for the hypervisor.
92 *
93 * @returns VBox status code.
94 * @param pVM Handle to the virtual machine.
95 * @param ppCtx Receives the hyper CPUMCTX pointer when successful.
96 *
97 * @deprecated This will *not* (and has never) given the right picture of the
98 * hypervisor register state. With CPUMHyperSetCtxCore() this is
99 * getting much worse. So, use the individual functions for getting
100 * and esp. setting the hypervisor registers.
101 */
102VMMDECL(int) CPUMQueryHyperCtxPtr(PVM pVM, PCPUMCTX *ppCtx)
103{
104 *ppCtx = &pVM->cpum.s.Hyper;
105 return VINF_SUCCESS;
106}
107
108
109VMMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit)
110{
111 pVM->cpum.s.Hyper.gdtr.cbGdt = limit;
112 pVM->cpum.s.Hyper.gdtr.pGdt = addr;
113 pVM->cpum.s.Hyper.gdtrPadding = 0;
114}
115
116
117VMMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit)
118{
119 pVM->cpum.s.Hyper.idtr.cbIdt = limit;
120 pVM->cpum.s.Hyper.idtr.pIdt = addr;
121 pVM->cpum.s.Hyper.idtrPadding = 0;
122}
123
124
125VMMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3)
126{
127 pVM->cpum.s.Hyper.cr3 = cr3;
128}
129
130
131VMMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS)
132{
133 pVM->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS;
134}
135
136
137VMMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS)
138{
139 pVM->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS;
140}
141
142
143VMMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES)
144{
145 pVM->cpum.s.CTX_SUFF(pHyperCore)->es = SelES;
146}
147
148
149VMMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS)
150{
151 pVM->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS;
152}
153
154
155VMMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS)
156{
157 pVM->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS;
158}
159
160
161VMMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS)
162{
163 pVM->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS;
164}
165
166
167VMMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP)
168{
169 pVM->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP;
170}
171
172
173VMMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl)
174{
175 pVM->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl;
176 return VINF_SUCCESS;
177}
178
179
180VMMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP)
181{
182 pVM->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP;
183}
184
185
186VMMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR)
187{
188 pVM->cpum.s.Hyper.tr = SelTR;
189}
190
191
192VMMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR)
193{
194 pVM->cpum.s.Hyper.ldtr = SelLDTR;
195}
196
197
198VMMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0)
199{
200 pVM->cpum.s.Hyper.dr[0] = uDr0;
201 /** @todo in GC we must load it! */
202}
203
204
205VMMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1)
206{
207 pVM->cpum.s.Hyper.dr[1] = uDr1;
208 /** @todo in GC we must load it! */
209}
210
211
212VMMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2)
213{
214 pVM->cpum.s.Hyper.dr[2] = uDr2;
215 /** @todo in GC we must load it! */
216}
217
218
219VMMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3)
220{
221 pVM->cpum.s.Hyper.dr[3] = uDr3;
222 /** @todo in GC we must load it! */
223}
224
225
226VMMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6)
227{
228 pVM->cpum.s.Hyper.dr[6] = uDr6;
229 /** @todo in GC we must load it! */
230}
231
232
233VMMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7)
234{
235 pVM->cpum.s.Hyper.dr[7] = uDr7;
236 /** @todo in GC we must load it! */
237}
238
239
240VMMDECL(RTSEL) CPUMGetHyperCS(PVM pVM)
241{
242 return pVM->cpum.s.CTX_SUFF(pHyperCore)->cs;
243}
244
245
246VMMDECL(RTSEL) CPUMGetHyperDS(PVM pVM)
247{
248 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ds;
249}
250
251
252VMMDECL(RTSEL) CPUMGetHyperES(PVM pVM)
253{
254 return pVM->cpum.s.CTX_SUFF(pHyperCore)->es;
255}
256
257
258VMMDECL(RTSEL) CPUMGetHyperFS(PVM pVM)
259{
260 return pVM->cpum.s.CTX_SUFF(pHyperCore)->fs;
261}
262
263
264VMMDECL(RTSEL) CPUMGetHyperGS(PVM pVM)
265{
266 return pVM->cpum.s.CTX_SUFF(pHyperCore)->gs;
267}
268
269
270VMMDECL(RTSEL) CPUMGetHyperSS(PVM pVM)
271{
272 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ss;
273}
274
275
276VMMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM)
277{
278 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eax;
279}
280
281
282VMMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM)
283{
284 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ebx;
285}
286
287
288VMMDECL(uint32_t) CPUMGetHyperECX(PVM pVM)
289{
290 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ecx;
291}
292
293
294VMMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM)
295{
296 return pVM->cpum.s.CTX_SUFF(pHyperCore)->edx;
297}
298
299
300VMMDECL(uint32_t) CPUMGetHyperESI(PVM pVM)
301{
302 return pVM->cpum.s.CTX_SUFF(pHyperCore)->esi;
303}
304
305
306VMMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM)
307{
308 return pVM->cpum.s.CTX_SUFF(pHyperCore)->edi;
309}
310
311
312VMMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM)
313{
314 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ebp;
315}
316
317
318VMMDECL(uint32_t) CPUMGetHyperESP(PVM pVM)
319{
320 return pVM->cpum.s.CTX_SUFF(pHyperCore)->esp;
321}
322
323
324VMMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM)
325{
326 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32;
327}
328
329
330VMMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM)
331{
332 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eip;
333}
334
335
336VMMDECL(uint64_t) CPUMGetHyperRIP(PVM pVM)
337{
338 return pVM->cpum.s.CTX_SUFF(pHyperCore)->rip;
339}
340
341
342VMMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit)
343{
344 if (pcbLimit)
345 *pcbLimit = pVM->cpum.s.Hyper.idtr.cbIdt;
346 return pVM->cpum.s.Hyper.idtr.pIdt;
347}
348
349
350VMMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit)
351{
352 if (pcbLimit)
353 *pcbLimit = pVM->cpum.s.Hyper.gdtr.cbGdt;
354 return pVM->cpum.s.Hyper.gdtr.pGdt;
355}
356
357
358VMMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM)
359{
360 return pVM->cpum.s.Hyper.ldtr;
361}
362
363
364VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM)
365{
366 return pVM->cpum.s.Hyper.dr[0];
367}
368
369
370VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM)
371{
372 return pVM->cpum.s.Hyper.dr[1];
373}
374
375
376VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM)
377{
378 return pVM->cpum.s.Hyper.dr[2];
379}
380
381
382VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM)
383{
384 return pVM->cpum.s.Hyper.dr[3];
385}
386
387
388VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM)
389{
390 return pVM->cpum.s.Hyper.dr[6];
391}
392
393
394VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM)
395{
396 return pVM->cpum.s.Hyper.dr[7];
397}
398
399
400/**
401 * Gets the pointer to the internal CPUMCTXCORE structure.
402 * This is only for reading in order to save a few calls.
403 *
404 * @param pVM Handle to the virtual machine.
405 */
406VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVM pVM)
407{
408 VM_ASSERT_EMT(pVM);
409 return CPUMCTX2CORE(&pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest);
410}
411
412
413/**
414 * Sets the guest context core registers.
415 *
416 * @param pVM Handle to the virtual machine.
417 * @param pCtxCore The new context core values.
418 */
419VMMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore)
420{
421 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */
422
423 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest);
424 *pCtxCoreDst = *pCtxCore;
425
426 /* Mask away invalid parts of the cpu context. */
427 if (!CPUMIsGuestInLongMode(pVM))
428 {
429 uint64_t u64Mask = UINT64_C(0xffffffff);
430
431 pCtxCoreDst->rip &= u64Mask;
432 pCtxCoreDst->rax &= u64Mask;
433 pCtxCoreDst->rbx &= u64Mask;
434 pCtxCoreDst->rcx &= u64Mask;
435 pCtxCoreDst->rdx &= u64Mask;
436 pCtxCoreDst->rsi &= u64Mask;
437 pCtxCoreDst->rdi &= u64Mask;
438 pCtxCoreDst->rbp &= u64Mask;
439 pCtxCoreDst->rsp &= u64Mask;
440 pCtxCoreDst->rflags.u &= u64Mask;
441
442 pCtxCoreDst->r8 = 0;
443 pCtxCoreDst->r9 = 0;
444 pCtxCoreDst->r10 = 0;
445 pCtxCoreDst->r11 = 0;
446 pCtxCoreDst->r12 = 0;
447 pCtxCoreDst->r13 = 0;
448 pCtxCoreDst->r14 = 0;
449 pCtxCoreDst->r15 = 0;
450 }
451}
452
453
454/**
455 * Queries the pointer to the internal CPUMCTX structure
456 *
457 * @returns The CPUMCTX pointer.
458 * @param pVM Handle to the virtual machine.
459 */
460VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVM pVM)
461{
462 return &pVM->aCpus[VMMGetCpuId(pVM)].cpum.s.Guest;
463}
464
465static PCPUMCPU cpumGetCpumCpu(PVM pVM)
466{
467 RTCPUID idCpu = VMMGetCpuId(pVM);
468
469 return &pVM->aCpus[idCpu].cpum.s;
470}
471
472VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtrEx(PVM pVM, PVMCPU pVCpu)
473{
474 Assert(pVCpu->idCpu < pVM->cCPUs);
475 return &pVCpu->cpum.s.Guest;
476}
477
478VMMDECL(int) CPUMSetGuestGDTR(PVM pVM, uint32_t addr, uint16_t limit)
479{
480 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
481
482 pCpumCpu->Guest.gdtr.cbGdt = limit;
483 pCpumCpu->Guest.gdtr.pGdt = addr;
484 pCpumCpu->fChanged |= CPUM_CHANGED_GDTR;
485 return VINF_SUCCESS;
486}
487
488VMMDECL(int) CPUMSetGuestIDTR(PVM pVM, uint32_t addr, uint16_t limit)
489{
490 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
491
492 pCpumCpu->Guest.idtr.cbIdt = limit;
493 pCpumCpu->Guest.idtr.pIdt = addr;
494 pCpumCpu->fChanged |= CPUM_CHANGED_IDTR;
495 return VINF_SUCCESS;
496}
497
498VMMDECL(int) CPUMSetGuestTR(PVM pVM, uint16_t tr)
499{
500 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
501
502 pCpumCpu->Guest.tr = tr;
503 pCpumCpu->fChanged |= CPUM_CHANGED_TR;
504 return VINF_SUCCESS;
505}
506
507VMMDECL(int) CPUMSetGuestLDTR(PVM pVM, uint16_t ldtr)
508{
509 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
510
511 pCpumCpu->Guest.ldtr = ldtr;
512 pCpumCpu->fChanged |= CPUM_CHANGED_LDTR;
513 return VINF_SUCCESS;
514}
515
516
517/**
518 * Set the guest CR0.
519 *
520 * When called in GC, the hyper CR0 may be updated if that is
521 * required. The caller only has to take special action if AM,
522 * WP, PG or PE changes.
523 *
524 * @returns VINF_SUCCESS (consider it void).
525 * @param pVM Pointer to the shared VM structure.
526 * @param cr0 The new CR0 value.
527 */
528VMMDECL(int) CPUMSetGuestCR0(PVM pVM, uint64_t cr0)
529{
530 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
531
532#ifdef IN_RC
533 /*
534 * Check if we need to change hypervisor CR0 because
535 * of math stuff.
536 */
537 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
538 != (pCpumCpu->Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
539 {
540 if (!(pCpumCpu->fUseFlags & CPUM_USED_FPU))
541 {
542 /*
543 * We haven't saved the host FPU state yet, so TS and MT are both set
544 * and EM should be reflecting the guest EM (it always does this).
545 */
546 if ((cr0 & X86_CR0_EM) != (pCpumCpu->Guest.cr0 & X86_CR0_EM))
547 {
548 uint32_t HyperCR0 = ASMGetCR0();
549 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
550 AssertMsg((HyperCR0 & X86_CR0_EM) == (pCpumCpu->Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
551 HyperCR0 &= ~X86_CR0_EM;
552 HyperCR0 |= cr0 & X86_CR0_EM;
553 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
554 ASMSetCR0(HyperCR0);
555 }
556# ifdef VBOX_STRICT
557 else
558 {
559 uint32_t HyperCR0 = ASMGetCR0();
560 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
561 AssertMsg((HyperCR0 & X86_CR0_EM) == (pCpumCpu->Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
562 }
563# endif
564 }
565 else
566 {
567 /*
568 * Already saved the state, so we're just mirroring
569 * the guest flags.
570 */
571 uint32_t HyperCR0 = ASMGetCR0();
572 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
573 == (pCpumCpu->Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
574 ("%#x %#x\n", HyperCR0, pCpumCpu->Guest.cr0));
575 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
576 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
577 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
578 ASMSetCR0(HyperCR0);
579 }
580 }
581#endif /* IN_RC */
582
583 /*
584 * Check for changes causing TLB flushes (for REM).
585 * The caller is responsible for calling PGM when appropriate.
586 */
587 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
588 != (pCpumCpu->Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
589 pCpumCpu->fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
590 pCpumCpu->fChanged |= CPUM_CHANGED_CR0;
591
592 pCpumCpu->Guest.cr0 = cr0 | X86_CR0_ET;
593 return VINF_SUCCESS;
594}
595
596
597VMMDECL(int) CPUMSetGuestCR2(PVM pVM, uint64_t cr2)
598{
599 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
600
601 pCpumCpu->Guest.cr2 = cr2;
602 return VINF_SUCCESS;
603}
604
605
606VMMDECL(int) CPUMSetGuestCR3(PVM pVM, uint64_t cr3)
607{
608 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
609
610 pCpumCpu->Guest.cr3 = cr3;
611 pCpumCpu->fChanged |= CPUM_CHANGED_CR3;
612 return VINF_SUCCESS;
613}
614
615
616VMMDECL(int) CPUMSetGuestCR4(PVM pVM, uint64_t cr4)
617{
618 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
619
620 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
621 != (pCpumCpu->Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
622 pCpumCpu->fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
623 pCpumCpu->fChanged |= CPUM_CHANGED_CR4;
624 if (!CPUMSupportsFXSR(pVM))
625 cr4 &= ~X86_CR4_OSFSXR;
626 pCpumCpu->Guest.cr4 = cr4;
627 return VINF_SUCCESS;
628}
629
630
631VMMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags)
632{
633 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
634
635 pCpumCpu->Guest.eflags.u32 = eflags;
636 return VINF_SUCCESS;
637}
638
639
640VMMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip)
641{
642 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
643
644 pCpumCpu->Guest.eip = eip;
645 return VINF_SUCCESS;
646}
647
648
649VMMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax)
650{
651 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
652
653 pCpumCpu->Guest.eax = eax;
654 return VINF_SUCCESS;
655}
656
657
658VMMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx)
659{
660 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
661
662 pCpumCpu->Guest.ebx = ebx;
663 return VINF_SUCCESS;
664}
665
666
667VMMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx)
668{
669 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
670
671 pCpumCpu->Guest.ecx = ecx;
672 return VINF_SUCCESS;
673}
674
675
676VMMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx)
677{
678 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
679
680 pCpumCpu->Guest.edx = edx;
681 return VINF_SUCCESS;
682}
683
684
685VMMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp)
686{
687 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
688
689 pCpumCpu->Guest.esp = esp;
690 return VINF_SUCCESS;
691}
692
693
694VMMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp)
695{
696 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
697
698 pCpumCpu->Guest.ebp = ebp;
699 return VINF_SUCCESS;
700}
701
702
703VMMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi)
704{
705 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
706
707 pCpumCpu->Guest.esi = esi;
708 return VINF_SUCCESS;
709}
710
711
712VMMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi)
713{
714 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
715
716 pCpumCpu->Guest.edi = edi;
717 return VINF_SUCCESS;
718}
719
720
721VMMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss)
722{
723 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
724
725 pCpumCpu->Guest.ss = ss;
726 return VINF_SUCCESS;
727}
728
729
730VMMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs)
731{
732 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
733
734 pCpumCpu->Guest.cs = cs;
735 return VINF_SUCCESS;
736}
737
738
739VMMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds)
740{
741 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
742
743 pCpumCpu->Guest.ds = ds;
744 return VINF_SUCCESS;
745}
746
747
748VMMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es)
749{
750 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
751
752 pCpumCpu->Guest.es = es;
753 return VINF_SUCCESS;
754}
755
756
757VMMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs)
758{
759 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
760
761 pCpumCpu->Guest.fs = fs;
762 return VINF_SUCCESS;
763}
764
765
766VMMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs)
767{
768 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
769
770 pCpumCpu->Guest.gs = gs;
771 return VINF_SUCCESS;
772}
773
774
775VMMDECL(void) CPUMSetGuestEFER(PVM pVM, uint64_t val)
776{
777 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
778
779 pCpumCpu->Guest.msrEFER = val;
780}
781
782
783VMMDECL(uint64_t) CPUMGetGuestMsr(PVM pVM, unsigned idMsr)
784{
785 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
786 uint64_t u64 = 0;
787
788 switch (idMsr)
789 {
790 case MSR_IA32_CR_PAT:
791 u64 = pCpumCpu->Guest.msrPAT;
792 break;
793
794 case MSR_IA32_SYSENTER_CS:
795 u64 = pCpumCpu->Guest.SysEnter.cs;
796 break;
797
798 case MSR_IA32_SYSENTER_EIP:
799 u64 = pCpumCpu->Guest.SysEnter.eip;
800 break;
801
802 case MSR_IA32_SYSENTER_ESP:
803 u64 = pCpumCpu->Guest.SysEnter.esp;
804 break;
805
806 case MSR_K6_EFER:
807 u64 = pCpumCpu->Guest.msrEFER;
808 break;
809
810 case MSR_K8_SF_MASK:
811 u64 = pCpumCpu->Guest.msrSFMASK;
812 break;
813
814 case MSR_K6_STAR:
815 u64 = pCpumCpu->Guest.msrSTAR;
816 break;
817
818 case MSR_K8_LSTAR:
819 u64 = pCpumCpu->Guest.msrLSTAR;
820 break;
821
822 case MSR_K8_CSTAR:
823 u64 = pCpumCpu->Guest.msrCSTAR;
824 break;
825
826 case MSR_K8_KERNEL_GS_BASE:
827 u64 = pCpumCpu->Guest.msrKERNELGSBASE;
828 break;
829
830 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
831 default:
832 AssertFailed();
833 break;
834 }
835 return u64;
836}
837
838
839VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit)
840{
841 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
842
843 if (pcbLimit)
844 *pcbLimit = pCpumCpu->Guest.idtr.cbIdt;
845 return pCpumCpu->Guest.idtr.pIdt;
846}
847
848
849VMMDECL(RTSEL) CPUMGetGuestTR(PVM pVM)
850{
851 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
852
853 return pCpumCpu->Guest.tr;
854}
855
856
857VMMDECL(RTSEL) CPUMGetGuestCS(PVM pVM)
858{
859 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
860
861 return pCpumCpu->Guest.cs;
862}
863
864
865VMMDECL(RTSEL) CPUMGetGuestDS(PVM pVM)
866{
867 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
868
869 return pCpumCpu->Guest.ds;
870}
871
872
873VMMDECL(RTSEL) CPUMGetGuestES(PVM pVM)
874{
875 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
876
877 return pCpumCpu->Guest.es;
878}
879
880
881VMMDECL(RTSEL) CPUMGetGuestFS(PVM pVM)
882{
883 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
884
885 return pCpumCpu->Guest.fs;
886}
887
888
889VMMDECL(RTSEL) CPUMGetGuestGS(PVM pVM)
890{
891 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
892
893 return pCpumCpu->Guest.gs;
894}
895
896
897VMMDECL(RTSEL) CPUMGetGuestSS(PVM pVM)
898{
899 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
900
901 return pCpumCpu->Guest.ss;
902}
903
904
905VMMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM)
906{
907 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
908
909 return pCpumCpu->Guest.ldtr;
910}
911
912
913VMMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM)
914{
915 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
916
917 return pCpumCpu->Guest.cr0;
918}
919
920
921VMMDECL(uint64_t) CPUMGetGuestCR2(PVM pVM)
922{
923 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
924
925 return pCpumCpu->Guest.cr2;
926}
927
928
929VMMDECL(uint64_t) CPUMGetGuestCR3(PVM pVM)
930{
931 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
932
933 return pCpumCpu->Guest.cr3;
934}
935
936
937VMMDECL(uint64_t) CPUMGetGuestCR4(PVM pVM)
938{
939 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
940
941 return pCpumCpu->Guest.cr4;
942}
943
944
945VMMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR)
946{
947 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
948
949 *pGDTR = pCpumCpu->Guest.gdtr;
950}
951
952
953VMMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM)
954{
955 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
956
957 return pCpumCpu->Guest.eip;
958}
959
960
961VMMDECL(uint64_t) CPUMGetGuestRIP(PVM pVM)
962{
963 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
964
965 return pCpumCpu->Guest.rip;
966}
967
968
969VMMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM)
970{
971 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
972
973 return pCpumCpu->Guest.eax;
974}
975
976
977VMMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM)
978{
979 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
980
981 return pCpumCpu->Guest.ebx;
982}
983
984
985VMMDECL(uint32_t) CPUMGetGuestECX(PVM pVM)
986{
987 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
988
989 return pCpumCpu->Guest.ecx;
990}
991
992
993VMMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM)
994{
995 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
996
997 return pCpumCpu->Guest.edx;
998}
999
1000
1001VMMDECL(uint32_t) CPUMGetGuestESI(PVM pVM)
1002{
1003 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1004
1005 return pCpumCpu->Guest.esi;
1006}
1007
1008
1009VMMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM)
1010{
1011 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1012
1013 return pCpumCpu->Guest.edi;
1014}
1015
1016
1017VMMDECL(uint32_t) CPUMGetGuestESP(PVM pVM)
1018{
1019 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1020
1021 return pCpumCpu->Guest.esp;
1022}
1023
1024
1025VMMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM)
1026{
1027 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1028
1029 return pCpumCpu->Guest.ebp;
1030}
1031
1032
1033VMMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM)
1034{
1035 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1036
1037 return pCpumCpu->Guest.eflags.u32;
1038}
1039
1040
1041VMMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)
1042{
1043 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1044
1045 return &pCpumCpu->Guest.trHid;
1046}
1047
1048
1049///@todo: crx should be an array
1050VMMDECL(int) CPUMGetGuestCRx(PVM pVM, unsigned iReg, uint64_t *pValue)
1051{
1052 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1053
1054 switch (iReg)
1055 {
1056 case USE_REG_CR0:
1057 *pValue = pCpumCpu->Guest.cr0;
1058 break;
1059 case USE_REG_CR2:
1060 *pValue = pCpumCpu->Guest.cr2;
1061 break;
1062 case USE_REG_CR3:
1063 *pValue = pCpumCpu->Guest.cr3;
1064 break;
1065 case USE_REG_CR4:
1066 *pValue = pCpumCpu->Guest.cr4;
1067 break;
1068 default:
1069 return VERR_INVALID_PARAMETER;
1070 }
1071 return VINF_SUCCESS;
1072}
1073
1074
1075VMMDECL(uint64_t) CPUMGetGuestDR0(PVM pVM)
1076{
1077 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1078
1079 return pCpumCpu->Guest.dr[0];
1080}
1081
1082
1083VMMDECL(uint64_t) CPUMGetGuestDR1(PVM pVM)
1084{
1085 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1086
1087 return pCpumCpu->Guest.dr[1];
1088}
1089
1090
1091VMMDECL(uint64_t) CPUMGetGuestDR2(PVM pVM)
1092{
1093 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1094
1095 return pCpumCpu->Guest.dr[2];
1096}
1097
1098
1099VMMDECL(uint64_t) CPUMGetGuestDR3(PVM pVM)
1100{
1101 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1102
1103 return pCpumCpu->Guest.dr[3];
1104}
1105
1106
1107VMMDECL(uint64_t) CPUMGetGuestDR6(PVM pVM)
1108{
1109 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1110
1111 return pCpumCpu->Guest.dr[6];
1112}
1113
1114
1115VMMDECL(uint64_t) CPUMGetGuestDR7(PVM pVM)
1116{
1117 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1118
1119 return pCpumCpu->Guest.dr[7];
1120}
1121
1122
1123VMMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint64_t *pValue)
1124{
1125 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1126
1127 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1128 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1129 if (iReg == 4 || iReg == 5)
1130 iReg += 2;
1131 *pValue = pCpumCpu->Guest.dr[iReg];
1132 return VINF_SUCCESS;
1133}
1134
1135
1136VMMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM)
1137{
1138 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1139
1140 return pCpumCpu->Guest.msrEFER;
1141}
1142
1143
1144/**
1145 * Gets a CpuId leaf.
1146 *
1147 * @param pVM The VM handle.
1148 * @param iLeaf The CPUID leaf to get.
1149 * @param pEax Where to store the EAX value.
1150 * @param pEbx Where to store the EBX value.
1151 * @param pEcx Where to store the ECX value.
1152 * @param pEdx Where to store the EDX value.
1153 */
1154VMMDECL(void) CPUMGetGuestCpuId(PVM pVM, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1155{
1156 PCCPUMCPUID pCpuId;
1157 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1158 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1159 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1160 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1161 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1162 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1163 else
1164 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1165
1166 *pEax = pCpuId->eax;
1167 *pEbx = pCpuId->ebx;
1168 *pEcx = pCpuId->ecx;
1169 *pEdx = pCpuId->edx;
1170 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1171}
1172
1173
1174/**
1175 * Gets a pointer to the array of standard CPUID leafs.
1176 *
1177 * CPUMGetGuestCpuIdStdMax() give the size of the array.
1178 *
1179 * @returns Pointer to the standard CPUID leafs (read-only).
1180 * @param pVM The VM handle.
1181 * @remark Intended for PATM.
1182 */
1183VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdStdRCPtr(PVM pVM)
1184{
1185 return RCPTRTYPE(PCCPUMCPUID)VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
1186}
1187
1188
1189/**
1190 * Gets a pointer to the array of extended CPUID leafs.
1191 *
1192 * CPUMGetGuestCpuIdExtMax() give the size of the array.
1193 *
1194 * @returns Pointer to the extended CPUID leafs (read-only).
1195 * @param pVM The VM handle.
1196 * @remark Intended for PATM.
1197 */
1198VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtRCPtr(PVM pVM)
1199{
1200 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
1201}
1202
1203
1204/**
1205 * Gets a pointer to the array of centaur CPUID leafs.
1206 *
1207 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
1208 *
1209 * @returns Pointer to the centaur CPUID leafs (read-only).
1210 * @param pVM The VM handle.
1211 * @remark Intended for PATM.
1212 */
1213VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurRCPtr(PVM pVM)
1214{
1215 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
1216}
1217
1218
1219/**
1220 * Gets a pointer to the default CPUID leaf.
1221 *
1222 * @returns Pointer to the default CPUID leaf (read-only).
1223 * @param pVM The VM handle.
1224 * @remark Intended for PATM.
1225 */
1226VMMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefRCPtr(PVM pVM)
1227{
1228 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
1229}
1230
1231
1232/**
1233 * Gets a number of standard CPUID leafs.
1234 *
1235 * @returns Number of leafs.
1236 * @param pVM The VM handle.
1237 * @remark Intended for PATM.
1238 */
1239VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1240{
1241 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1242}
1243
1244
1245/**
1246 * Gets a number of extended CPUID leafs.
1247 *
1248 * @returns Number of leafs.
1249 * @param pVM The VM handle.
1250 * @remark Intended for PATM.
1251 */
1252VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1253{
1254 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1255}
1256
1257
1258/**
1259 * Gets a number of centaur CPUID leafs.
1260 *
1261 * @returns Number of leafs.
1262 * @param pVM The VM handle.
1263 * @remark Intended for PATM.
1264 */
1265VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1266{
1267 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1268}
1269
1270
1271/**
1272 * Sets a CPUID feature bit.
1273 *
1274 * @param pVM The VM Handle.
1275 * @param enmFeature The feature to set.
1276 */
1277VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1278{
1279 switch (enmFeature)
1280 {
1281 /*
1282 * Set the APIC bit in both feature masks.
1283 */
1284 case CPUMCPUIDFEATURE_APIC:
1285 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1286 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1287 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1288 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1289 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1290 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1291 break;
1292
1293 /*
1294 * Set the x2APIC bit in the standard feature mask.
1295 */
1296 case CPUMCPUIDFEATURE_X2APIC:
1297 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1298 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1299 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1300 break;
1301
1302 /*
1303 * Set the sysenter/sysexit bit in the standard feature mask.
1304 * Assumes the caller knows what it's doing! (host must support these)
1305 */
1306 case CPUMCPUIDFEATURE_SEP:
1307 {
1308 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1309 {
1310 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1311 return;
1312 }
1313
1314 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1315 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1316 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1317 break;
1318 }
1319
1320 /*
1321 * Set the syscall/sysret bit in the extended feature mask.
1322 * Assumes the caller knows what it's doing! (host must support these)
1323 */
1324 case CPUMCPUIDFEATURE_SYSCALL:
1325 {
1326 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1327 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
1328 {
1329 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1330 return;
1331 }
1332 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1333 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
1334 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1335 break;
1336 }
1337
1338 /*
1339 * Set the PAE bit in both feature masks.
1340 * Assumes the caller knows what it's doing! (host must support these)
1341 */
1342 case CPUMCPUIDFEATURE_PAE:
1343 {
1344 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1345 {
1346 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1347 return;
1348 }
1349
1350 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1351 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1352 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1353 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1354 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1355 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1356 break;
1357 }
1358
1359 /*
1360 * Set the LONG MODE bit in the extended feature mask.
1361 * Assumes the caller knows what it's doing! (host must support these)
1362 */
1363 case CPUMCPUIDFEATURE_LONG_MODE:
1364 {
1365 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1366 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1367 {
1368 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1369 return;
1370 }
1371
1372 /* Valid for both Intel and AMD. */
1373 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
1374 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1375 break;
1376 }
1377
1378 /*
1379 * Set the NXE bit in the extended feature mask.
1380 * Assumes the caller knows what it's doing! (host must support these)
1381 */
1382 case CPUMCPUIDFEATURE_NXE:
1383 {
1384 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1385 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
1386 {
1387 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
1388 return;
1389 }
1390
1391 /* Valid for both Intel and AMD. */
1392 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
1393 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
1394 break;
1395 }
1396
1397 case CPUMCPUIDFEATURE_LAHF:
1398 {
1399 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1400 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
1401 {
1402 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1403 return;
1404 }
1405
1406 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
1407 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1408 break;
1409 }
1410
1411 case CPUMCPUIDFEATURE_PAT:
1412 {
1413 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1414 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1415 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1416 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1417 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1418 LogRel(("CPUMClearGuestCpuIdFeature: Enabled PAT\n"));
1419 break;
1420 }
1421
1422 default:
1423 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1424 break;
1425 }
1426 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1427
1428 pCpumCpu->fChanged |= CPUM_CHANGED_CPUID;
1429}
1430
1431
1432/**
1433 * Queries a CPUID feature bit.
1434 *
1435 * @returns boolean for feature presence
1436 * @param pVM The VM Handle.
1437 * @param enmFeature The feature to query.
1438 */
1439VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1440{
1441 switch (enmFeature)
1442 {
1443 case CPUMCPUIDFEATURE_PAE:
1444 {
1445 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1446 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1447 break;
1448 }
1449
1450 default:
1451 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1452 break;
1453 }
1454 return false;
1455}
1456
1457
1458/**
1459 * Clears a CPUID feature bit.
1460 *
1461 * @param pVM The VM Handle.
1462 * @param enmFeature The feature to clear.
1463 */
1464VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1465{
1466 switch (enmFeature)
1467 {
1468 /*
1469 * Set the APIC bit in both feature masks.
1470 */
1471 case CPUMCPUIDFEATURE_APIC:
1472 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1473 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
1474 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1475 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1476 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1477 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n"));
1478 break;
1479
1480 /*
1481 * Clear the x2APIC bit in the standard feature mask.
1482 */
1483 case CPUMCPUIDFEATURE_X2APIC:
1484 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1485 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1486 LogRel(("CPUMSetGuestCpuIdFeature: Disabled x2APIC\n"));
1487 break;
1488
1489 case CPUMCPUIDFEATURE_PAE:
1490 {
1491 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1492 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
1493 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1494 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1495 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1496 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
1497 break;
1498 }
1499
1500 case CPUMCPUIDFEATURE_PAT:
1501 {
1502 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1503 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
1504 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1505 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
1506 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1507 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
1508 break;
1509 }
1510
1511 default:
1512 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1513 break;
1514 }
1515 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1516 pCpumCpu->fChanged |= CPUM_CHANGED_CPUID;
1517}
1518
1519
1520/**
1521 * Gets the CPU vendor
1522 *
1523 * @returns CPU vendor
1524 * @param pVM The VM handle.
1525 */
1526VMMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM)
1527{
1528 return pVM->cpum.s.enmCPUVendor;
1529}
1530
1531
1532VMMDECL(int) CPUMSetGuestDR0(PVM pVM, uint64_t uDr0)
1533{
1534 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1535
1536 pCpumCpu->Guest.dr[0] = uDr0;
1537 return CPUMRecalcHyperDRx(pVM);
1538}
1539
1540
1541VMMDECL(int) CPUMSetGuestDR1(PVM pVM, uint64_t uDr1)
1542{
1543 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1544
1545 pCpumCpu->Guest.dr[1] = uDr1;
1546 return CPUMRecalcHyperDRx(pVM);
1547}
1548
1549
1550VMMDECL(int) CPUMSetGuestDR2(PVM pVM, uint64_t uDr2)
1551{
1552 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1553
1554 pCpumCpu->Guest.dr[2] = uDr2;
1555 return CPUMRecalcHyperDRx(pVM);
1556}
1557
1558
1559VMMDECL(int) CPUMSetGuestDR3(PVM pVM, uint64_t uDr3)
1560{
1561 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1562
1563 pCpumCpu->Guest.dr[3] = uDr3;
1564 return CPUMRecalcHyperDRx(pVM);
1565}
1566
1567
1568VMMDECL(int) CPUMSetGuestDR6(PVM pVM, uint64_t uDr6)
1569{
1570 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1571
1572 pCpumCpu->Guest.dr[6] = uDr6;
1573 return CPUMRecalcHyperDRx(pVM);
1574}
1575
1576
1577VMMDECL(int) CPUMSetGuestDR7(PVM pVM, uint64_t uDr7)
1578{
1579 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1580
1581 pCpumCpu->Guest.dr[7] = uDr7;
1582 return CPUMRecalcHyperDRx(pVM);
1583}
1584
1585
1586VMMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint64_t Value)
1587{
1588 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1589
1590 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1591 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1592 if (iReg == 4 || iReg == 5)
1593 iReg += 2;
1594 pCpumCpu->Guest.dr[iReg] = Value;
1595 return CPUMRecalcHyperDRx(pVM);
1596}
1597
1598
1599/**
1600 * Recalculates the hypvervisor DRx register values based on
1601 * current guest registers and DBGF breakpoints.
1602 *
1603 * This is called whenever a guest DRx register is modified and when DBGF
1604 * sets a hardware breakpoint. In guest context this function will reload
1605 * any (hyper) DRx registers which comes out with a different value.
1606 *
1607 * @returns VINF_SUCCESS.
1608 * @param pVM The VM handle.
1609 */
1610VMMDECL(int) CPUMRecalcHyperDRx(PVM pVM)
1611{
1612 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1613 /*
1614 * Compare the DR7s first.
1615 *
1616 * We only care about the enabled flags. The GE and LE flags are always
1617 * set and we don't care if the guest doesn't set them. GD is virtualized
1618 * when we dispatch #DB, we never enable it.
1619 */
1620 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1621#ifdef CPUM_VIRTUALIZE_DRX
1622 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVM);
1623#else
1624 const RTGCUINTREG uGstDr7 = 0;
1625#endif
1626 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1627 {
1628 /*
1629 * Ok, something is enabled. Recalc each of the breakpoints.
1630 * Straight forward code, not optimized/minimized in any way.
1631 */
1632 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
1633
1634 /* bp 0 */
1635 RTGCUINTREG uNewDr0;
1636 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1637 {
1638 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1639 uNewDr0 = DBGFBpGetDR0(pVM);
1640 }
1641 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1642 {
1643 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1644 uNewDr0 = CPUMGetGuestDR0(pVM);
1645 }
1646 else
1647 uNewDr0 = pVM->cpum.s.Hyper.dr[0];
1648
1649 /* bp 1 */
1650 RTGCUINTREG uNewDr1;
1651 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1652 {
1653 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1654 uNewDr1 = DBGFBpGetDR1(pVM);
1655 }
1656 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1657 {
1658 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1659 uNewDr1 = CPUMGetGuestDR1(pVM);
1660 }
1661 else
1662 uNewDr1 = pVM->cpum.s.Hyper.dr[1];
1663
1664 /* bp 2 */
1665 RTGCUINTREG uNewDr2;
1666 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1667 {
1668 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1669 uNewDr2 = DBGFBpGetDR2(pVM);
1670 }
1671 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1672 {
1673 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1674 uNewDr2 = CPUMGetGuestDR2(pVM);
1675 }
1676 else
1677 uNewDr2 = pVM->cpum.s.Hyper.dr[2];
1678
1679 /* bp 3 */
1680 RTGCUINTREG uNewDr3;
1681 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1682 {
1683 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1684 uNewDr3 = DBGFBpGetDR3(pVM);
1685 }
1686 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1687 {
1688 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1689 uNewDr3 = CPUMGetGuestDR3(pVM);
1690 }
1691 else
1692 uNewDr3 = pVM->cpum.s.Hyper.dr[3];
1693
1694 /*
1695 * Apply the updates.
1696 */
1697#ifdef IN_RC
1698 if (!(pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS))
1699 {
1700 /** @todo save host DBx registers. */
1701 }
1702#endif
1703 pCpumCpu->fUseFlags |= CPUM_USE_DEBUG_REGS;
1704 if (uNewDr3 != pVM->cpum.s.Hyper.dr[3])
1705 CPUMSetHyperDR3(pVM, uNewDr3);
1706 if (uNewDr2 != pVM->cpum.s.Hyper.dr[2])
1707 CPUMSetHyperDR2(pVM, uNewDr2);
1708 if (uNewDr1 != pVM->cpum.s.Hyper.dr[1])
1709 CPUMSetHyperDR1(pVM, uNewDr1);
1710 if (uNewDr0 != pVM->cpum.s.Hyper.dr[0])
1711 CPUMSetHyperDR0(pVM, uNewDr0);
1712 if (uNewDr7 != pVM->cpum.s.Hyper.dr[7])
1713 CPUMSetHyperDR7(pVM, uNewDr7);
1714 }
1715 else
1716 {
1717#ifdef IN_RC
1718 if (pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS)
1719 {
1720 /** @todo restore host DBx registers. */
1721 }
1722#endif
1723 pCpumCpu->fUseFlags &= ~CPUM_USE_DEBUG_REGS;
1724 }
1725 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1726 pCpumCpu->fUseFlags, pVM->cpum.s.Hyper.dr[0], pVM->cpum.s.Hyper.dr[1],
1727 pVM->cpum.s.Hyper.dr[2], pVM->cpum.s.Hyper.dr[3], pVM->cpum.s.Hyper.dr[6],
1728 pVM->cpum.s.Hyper.dr[7]));
1729
1730 return VINF_SUCCESS;
1731}
1732
1733#ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */
1734
1735/**
1736 * Transforms the guest CPU state to raw-ring mode.
1737 *
1738 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1739 *
1740 * @returns VBox status. (recompiler failure)
1741 * @param pVM VM handle.
1742 * @param pCtxCore The context core (for trap usage).
1743 * @see @ref pg_raw
1744 */
1745VMMDECL(int) CPUMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
1746{
1747 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1748
1749 Assert(!pVM->cpum.s.fRawEntered);
1750 if (!pCtxCore)
1751 pCtxCore = CPUMCTX2CORE(&pCpumCpu->Guest);
1752
1753 /*
1754 * Are we in Ring-0?
1755 */
1756 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0
1757 && !pCtxCore->eflags.Bits.u1VM)
1758 {
1759 /*
1760 * Enter execution mode.
1761 */
1762 PATMRawEnter(pVM, pCtxCore);
1763
1764 /*
1765 * Set CPL to Ring-1.
1766 */
1767 pCtxCore->ss |= 1;
1768 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0)
1769 pCtxCore->cs |= 1;
1770 }
1771 else
1772 {
1773 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
1774 ("ring-1 code not supported\n"));
1775 /*
1776 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
1777 */
1778 PATMRawEnter(pVM, pCtxCore);
1779 }
1780
1781 /*
1782 * Assert sanity.
1783 */
1784 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
1785 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL)
1786 || pCtxCore->eflags.Bits.u1VM,
1787 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1788 Assert((pCpumCpu->Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
1789 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
1790
1791 pVM->cpum.s.fRawEntered = true;
1792 return VINF_SUCCESS;
1793}
1794
1795
1796/**
1797 * Transforms the guest CPU state from raw-ring mode to correct values.
1798 *
1799 * This function will change any selector registers with DPL=1 to DPL=0.
1800 *
1801 * @returns Adjusted rc.
1802 * @param pVM VM handle.
1803 * @param rc Raw mode return code
1804 * @param pCtxCore The context core (for trap usage).
1805 * @see @ref pg_raw
1806 */
1807VMMDECL(int) CPUMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rc)
1808{
1809 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1810
1811 /*
1812 * Don't leave if we've already left (in GC).
1813 */
1814 Assert(pVM->cpum.s.fRawEntered);
1815 if (!pVM->cpum.s.fRawEntered)
1816 return rc;
1817 pVM->cpum.s.fRawEntered = false;
1818
1819 PCPUMCTX pCtx = &pCpumCpu->Guest;
1820 if (!pCtxCore)
1821 pCtxCore = CPUMCTX2CORE(pCtx);
1822 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL));
1823 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL),
1824 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL));
1825
1826 /*
1827 * Are we executing in raw ring-1?
1828 */
1829 if ( (pCtxCore->ss & X86_SEL_RPL) == 1
1830 && !pCtxCore->eflags.Bits.u1VM)
1831 {
1832 /*
1833 * Leave execution mode.
1834 */
1835 PATMRawLeave(pVM, pCtxCore, rc);
1836 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
1837 /** @todo See what happens if we remove this. */
1838 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1839 pCtxCore->ds &= ~X86_SEL_RPL;
1840 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1841 pCtxCore->es &= ~X86_SEL_RPL;
1842 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1843 pCtxCore->fs &= ~X86_SEL_RPL;
1844 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1845 pCtxCore->gs &= ~X86_SEL_RPL;
1846
1847 /*
1848 * Ring-1 selector => Ring-0.
1849 */
1850 pCtxCore->ss &= ~X86_SEL_RPL;
1851 if ((pCtxCore->cs & X86_SEL_RPL) == 1)
1852 pCtxCore->cs &= ~X86_SEL_RPL;
1853 }
1854 else
1855 {
1856 /*
1857 * PATM is taking care of the IOPL and IF flags for us.
1858 */
1859 PATMRawLeave(pVM, pCtxCore, rc);
1860 if (!pCtxCore->eflags.Bits.u1VM)
1861 {
1862 /** @todo See what happens if we remove this. */
1863 if ((pCtxCore->ds & X86_SEL_RPL) == 1)
1864 pCtxCore->ds &= ~X86_SEL_RPL;
1865 if ((pCtxCore->es & X86_SEL_RPL) == 1)
1866 pCtxCore->es &= ~X86_SEL_RPL;
1867 if ((pCtxCore->fs & X86_SEL_RPL) == 1)
1868 pCtxCore->fs &= ~X86_SEL_RPL;
1869 if ((pCtxCore->gs & X86_SEL_RPL) == 1)
1870 pCtxCore->gs &= ~X86_SEL_RPL;
1871 }
1872 }
1873
1874 return rc;
1875}
1876
1877/**
1878 * Updates the EFLAGS while we're in raw-mode.
1879 *
1880 * @param pVM The VM handle.
1881 * @param pCtxCore The context core.
1882 * @param eflags The new EFLAGS value.
1883 */
1884VMMDECL(void) CPUMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t eflags)
1885{
1886 if (!pVM->cpum.s.fRawEntered)
1887 {
1888 pCtxCore->eflags.u32 = eflags;
1889 return;
1890 }
1891 PATMRawSetEFlags(pVM, pCtxCore, eflags);
1892}
1893
1894#endif /* !IN_RING0 */
1895
1896/**
1897 * Gets the EFLAGS while we're in raw-mode.
1898 *
1899 * @returns The eflags.
1900 * @param pVM The VM handle.
1901 * @param pCtxCore The context core.
1902 */
1903VMMDECL(uint32_t) CPUMRawGetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore)
1904{
1905#ifdef IN_RING0
1906 return pCtxCore->eflags.u32;
1907#else
1908 if (!pVM->cpum.s.fRawEntered)
1909 return pCtxCore->eflags.u32;
1910 return PATMRawGetEFlags(pVM, pCtxCore);
1911#endif
1912}
1913
1914
1915/**
1916 * Gets and resets the changed flags (CPUM_CHANGED_*).
1917 * Only REM should call this function.
1918 *
1919 * @returns The changed flags.
1920 * @param pVM The VM handle.
1921 */
1922VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVM pVM)
1923{
1924 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1925
1926 unsigned fFlags = pCpumCpu->fChanged;
1927 pCpumCpu->fChanged = 0;
1928 /** @todo change the switcher to use the fChanged flags. */
1929 if (pCpumCpu->fUseFlags & CPUM_USED_FPU_SINCE_REM)
1930 {
1931 fFlags |= CPUM_CHANGED_FPU_REM;
1932 pCpumCpu->fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
1933 }
1934 return fFlags;
1935}
1936
1937
1938/**
1939 * Sets the specified changed flags (CPUM_CHANGED_*).
1940 *
1941 * @param pVM The VM handle.
1942 */
1943VMMDECL(void) CPUMSetChangedFlags(PVM pVM, uint32_t fChangedFlags)
1944{
1945 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1946
1947 pCpumCpu->fChanged |= fChangedFlags;
1948}
1949
1950
1951/**
1952 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
1953 * @returns true if supported.
1954 * @returns false if not supported.
1955 * @param pVM The VM handle.
1956 */
1957VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
1958{
1959 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
1960}
1961
1962
1963/**
1964 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1965 * @returns true if used.
1966 * @returns false if not used.
1967 * @param pVM The VM handle.
1968 */
1969VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1970{
1971 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1972
1973 return (pCpumCpu->fUseFlags & CPUM_USE_SYSENTER) != 0;
1974}
1975
1976
1977/**
1978 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1979 * @returns true if used.
1980 * @returns false if not used.
1981 * @param pVM The VM handle.
1982 */
1983VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1984{
1985 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
1986
1987 return (pCpumCpu->fUseFlags & CPUM_USE_SYSCALL) != 0;
1988}
1989
1990#ifndef IN_RING3
1991
1992/**
1993 * Lazily sync in the FPU/XMM state
1994 *
1995 * @returns VBox status code.
1996 * @param pVM VM handle.
1997 * @param pVCpu VMCPU handle
1998 */
1999VMMDECL(int) CPUMHandleLazyFPU(PVM pVM, PVMCPU pVCpu)
2000{
2001 return CPUMHandleLazyFPUAsm(&pVCpu->cpum.s);
2002}
2003
2004
2005/**
2006 * Restore host FPU/XMM state
2007 *
2008 * @returns VBox status code.
2009 * @param pVM VM handle.
2010 * @param pVCpu VMCPU handle
2011 */
2012VMMDECL(int) CPUMRestoreHostFPUState(PVM pVM, PVMCPU pVCpu)
2013{
2014 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
2015 return CPUMRestoreHostFPUStateAsm(&pVCpu->cpum.s);
2016}
2017
2018#endif /* !IN_RING3 */
2019
2020/**
2021 * Checks if we activated the FPU/XMM state of the guest OS
2022 * @returns true if we did.
2023 * @returns false if not.
2024 * @param pVCpu The VMCPU handle.
2025 */
2026VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2027{
2028 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2029}
2030
2031
2032/**
2033 * Deactivate the FPU/XMM state of the guest OS
2034 * @param pVM The VM handle.
2035 */
2036VMMDECL(void) CPUMDeactivateGuestFPUState(PVM pVM)
2037{
2038 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2039
2040 pCpumCpu->fUseFlags &= ~CPUM_USED_FPU;
2041}
2042
2043
2044/**
2045 * Checks if the guest debug state is active
2046 *
2047 * @returns boolean
2048 * @param pVM VM handle.
2049 */
2050VMMDECL(bool) CPUMIsGuestDebugStateActive(PVM pVM)
2051{
2052 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2053
2054 return (pCpumCpu->fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2055}
2056
2057
2058/**
2059 * Mark the guest's debug state as inactive
2060 *
2061 * @returns boolean
2062 * @param pVM VM handle.
2063 */
2064VMMDECL(void) CPUMDeactivateGuestDebugState(PVM pVM)
2065{
2066 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2067
2068 pCpumCpu->fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2069}
2070
2071
2072/**
2073 * Checks if the hidden selector registers are valid
2074 * @returns true if they are.
2075 * @returns false if not.
2076 * @param pVM The VM handle.
2077 */
2078VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM)
2079{
2080 return !!pVM->cpum.s.fValidHiddenSelRegs; /** @todo change fValidHiddenSelRegs to bool! */
2081}
2082
2083
2084/**
2085 * Checks if the hidden selector registers are valid
2086 * @param pVM The VM handle.
2087 * @param fValid Valid or not
2088 */
2089VMMDECL(void) CPUMSetHiddenSelRegsValid(PVM pVM, bool fValid)
2090{
2091 pVM->cpum.s.fValidHiddenSelRegs = fValid;
2092}
2093
2094
2095/**
2096 * Get the current privilege level of the guest.
2097 *
2098 * @returns cpl
2099 * @param pVM VM Handle.
2100 * @param pRegFrame Trap register frame.
2101 */
2102VMMDECL(uint32_t) CPUMGetGuestCPL(PVM pVM, PCPUMCTXCORE pCtxCore)
2103{
2104 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2105 uint32_t cpl;
2106
2107 if (CPUMAreHiddenSelRegsValid(pVM))
2108 {
2109 /*
2110 * The hidden CS.DPL register is always equal to the CPL, it is
2111 * not affected by loading a conforming coding segment.
2112 *
2113 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
2114 * at SS. (ACP2 regression during install after a far call to ring 2)
2115 */
2116 if (RT_LIKELY(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2117 cpl = pCtxCore->ssHid.Attr.n.u2Dpl;
2118 else
2119 cpl = 0; /* CPL set to 3 for VT-x real-mode emulation. */
2120 }
2121 else if (RT_LIKELY(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2122 {
2123 if (RT_LIKELY(!pCtxCore->eflags.Bits.u1VM))
2124 {
2125 /*
2126 * The SS RPL is always equal to the CPL, while the CS RPL
2127 * isn't necessarily equal if the segment is conforming.
2128 * See section 4.11.1 in the AMD manual.
2129 */
2130 cpl = (pCtxCore->ss & X86_SEL_RPL);
2131#ifndef IN_RING0
2132 if (cpl == 1)
2133 cpl = 0;
2134#endif
2135 }
2136 else
2137 cpl = 3;
2138 }
2139 else
2140 cpl = 0; /* real mode; cpl is zero */
2141
2142 return cpl;
2143}
2144
2145
2146/**
2147 * Gets the current guest CPU mode.
2148 *
2149 * If paging mode is what you need, check out PGMGetGuestMode().
2150 *
2151 * @returns The CPU mode.
2152 * @param pVM The VM handle.
2153 */
2154VMMDECL(CPUMMODE) CPUMGetGuestMode(PVM pVM)
2155{
2156 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);
2157
2158 CPUMMODE enmMode;
2159 if (!(pCpumCpu->Guest.cr0 & X86_CR0_PE))
2160 enmMode = CPUMMODE_REAL;
2161 else if (!(pCpumCpu->Guest.msrEFER & MSR_K6_EFER_LMA))
2162 enmMode = CPUMMODE_PROTECTED;
2163 else
2164 enmMode = CPUMMODE_LONG;
2165
2166 return enmMode;
2167}
2168
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette