VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 45733

Last change on this file since 45733 was 45485, checked in by vboxsync, 12 years ago
  • *: Where possible, drop the #ifdef VBOX_WITH_RAW_RING1 when EMIsRawRing1Enabled is used.
  • SELM: Don't shadow TSS.esp1/ss1 unless ring-1 compression is enabled (also fixed a log statement there).
  • SELM: selmGuestToShadowDesc should not push ring-1 selectors into ring-2 unless EMIsRawRing1Enabled() holds true.
  • REM: Don't set CPU_INTERRUPT_EXTERNAL_EXIT in helper_ltr() for now.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 78.4 KB
Line 
1/* $Id: CPUMAllRegs.cpp 45485 2013-04-11 14:46:04Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/**
57 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
58 *
59 * @returns Pointer to the Virtual CPU.
60 * @param a_pGuestCtx Pointer to the guest context.
61 */
62#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
63
64/**
65 * Lazily loads the hidden parts of a selector register when using raw-mode.
66 */
67#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
68# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 do \
70 { \
71 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
72 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
73 } while (0)
74#else
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
77#endif
78
79
80
81#ifdef VBOX_WITH_RAW_MODE_NOT_R0
82
83/**
84 * Does the lazy hidden selector register loading.
85 *
86 * @param pVCpu The current Virtual CPU.
87 * @param pSReg The selector register to lazily load hidden parts of.
88 */
89static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
90{
91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
92 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
93 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
94
95 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
96 {
97 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
98 pSReg->Attr.u = 0;
99 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
100 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
101 pSReg->Attr.n.u2Dpl = 3;
102 pSReg->Attr.n.u1Present = 1;
103 pSReg->u32Limit = 0x0000ffff;
104 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
105 pSReg->ValidSel = pSReg->Sel;
106 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
107 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
108 }
109 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
110 {
111 /* Real mode - leave the limit and flags alone here, at least for now. */
112 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
113 pSReg->ValidSel = pSReg->Sel;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 }
116 else
117 {
118 /* Protected mode - get it from the selector descriptor tables. */
119 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
120 {
121 Assert(!CPUMIsGuestInLongMode(pVCpu));
122 pSReg->Sel = 0;
123 pSReg->u64Base = 0;
124 pSReg->u32Limit = 0;
125 pSReg->Attr.u = 0;
126 pSReg->ValidSel = 0;
127 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
128 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
129 }
130 else
131 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
132 }
133}
134
135
136/**
137 * Makes sure the hidden CS and SS selector registers are valid, loading them if
138 * necessary.
139 *
140 * @param pVCpu The current virtual CPU.
141 */
142VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
143{
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
145 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
146}
147
148
149/**
150 * Loads a the hidden parts of a selector register.
151 *
152 * @param pVCpu The current virtual CPU.
153 */
154VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
155{
156 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
157}
158
159#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
160
161
162/**
163 * Obsolete.
164 *
165 * We don't support nested hypervisor context interrupts or traps. Life is much
166 * simpler when we don't. It's also slightly faster at times.
167 *
168 * @param pVM Handle to the virtual machine.
169 */
170VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
171{
172 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
173}
174
175
176/**
177 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
178 *
179 * @param pVCpu Pointer to the VMCPU.
180 */
181VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
182{
183 return &pVCpu->cpum.s.Hyper;
184}
185
186
187VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
188{
189 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
190 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
191}
192
193
194VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
195{
196 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
197 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
198}
199
200
201VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
202{
203 pVCpu->cpum.s.Hyper.cr3 = cr3;
204
205#ifdef IN_RC
206 /* Update the current CR3. */
207 ASMSetCR3(cr3);
208#endif
209}
210
211VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
212{
213 return pVCpu->cpum.s.Hyper.cr3;
214}
215
216
217VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
218{
219 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
220}
221
222
223VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
224{
225 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
226}
227
228
229VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
230{
231 pVCpu->cpum.s.Hyper.es.Sel = SelES;
232}
233
234
235VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
236{
237 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
238}
239
240
241VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
242{
243 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
244}
245
246
247VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
248{
249 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
250}
251
252
253VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
254{
255 pVCpu->cpum.s.Hyper.esp = u32ESP;
256}
257
258
259VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
260{
261 pVCpu->cpum.s.Hyper.esp = u32ESP;
262}
263
264
265VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
266{
267 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
268 return VINF_SUCCESS;
269}
270
271
272VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
273{
274 pVCpu->cpum.s.Hyper.eip = u32EIP;
275}
276
277
278/**
279 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
280 * EFLAGS and EIP prior to resuming guest execution.
281 *
282 * All general register not given as a parameter will be set to 0. The EFLAGS
283 * register will be set to sane values for C/C++ code execution with interrupts
284 * disabled and IOPL 0.
285 *
286 * @param pVCpu The current virtual CPU.
287 * @param u32EIP The EIP value.
288 * @param u32ESP The ESP value.
289 * @param u32EAX The EAX value.
290 * @param u32EDX The EDX value.
291 */
292VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
293{
294 pVCpu->cpum.s.Hyper.eip = u32EIP;
295 pVCpu->cpum.s.Hyper.esp = u32ESP;
296 pVCpu->cpum.s.Hyper.eax = u32EAX;
297 pVCpu->cpum.s.Hyper.edx = u32EDX;
298 pVCpu->cpum.s.Hyper.ecx = 0;
299 pVCpu->cpum.s.Hyper.ebx = 0;
300 pVCpu->cpum.s.Hyper.ebp = 0;
301 pVCpu->cpum.s.Hyper.esi = 0;
302 pVCpu->cpum.s.Hyper.edi = 0;
303 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
304}
305
306
307VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
308{
309 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
310}
311
312
313VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
314{
315 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
316}
317
318
319VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
320{
321 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
322 /** @todo in GC we must load it! */
323}
324
325
326VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
327{
328 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
329 /** @todo in GC we must load it! */
330}
331
332
333VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
334{
335 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
336 /** @todo in GC we must load it! */
337}
338
339
340VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
341{
342 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
343 /** @todo in GC we must load it! */
344}
345
346
347VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
348{
349 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
350 /** @todo in GC we must load it! */
351}
352
353
354VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
355{
356 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
357 /** @todo in GC we must load it! */
358}
359
360
361VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
362{
363 return pVCpu->cpum.s.Hyper.cs.Sel;
364}
365
366
367VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
368{
369 return pVCpu->cpum.s.Hyper.ds.Sel;
370}
371
372
373VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
374{
375 return pVCpu->cpum.s.Hyper.es.Sel;
376}
377
378
379VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
380{
381 return pVCpu->cpum.s.Hyper.fs.Sel;
382}
383
384
385VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
386{
387 return pVCpu->cpum.s.Hyper.gs.Sel;
388}
389
390
391VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
392{
393 return pVCpu->cpum.s.Hyper.ss.Sel;
394}
395
396
397VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
398{
399 return pVCpu->cpum.s.Hyper.eax;
400}
401
402
403VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
404{
405 return pVCpu->cpum.s.Hyper.ebx;
406}
407
408
409VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
410{
411 return pVCpu->cpum.s.Hyper.ecx;
412}
413
414
415VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
416{
417 return pVCpu->cpum.s.Hyper.edx;
418}
419
420
421VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
422{
423 return pVCpu->cpum.s.Hyper.esi;
424}
425
426
427VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
428{
429 return pVCpu->cpum.s.Hyper.edi;
430}
431
432
433VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
434{
435 return pVCpu->cpum.s.Hyper.ebp;
436}
437
438
439VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
440{
441 return pVCpu->cpum.s.Hyper.esp;
442}
443
444
445VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
446{
447 return pVCpu->cpum.s.Hyper.eflags.u32;
448}
449
450
451VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
452{
453 return pVCpu->cpum.s.Hyper.eip;
454}
455
456
457VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
458{
459 return pVCpu->cpum.s.Hyper.rip;
460}
461
462
463VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
464{
465 if (pcbLimit)
466 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
467 return pVCpu->cpum.s.Hyper.idtr.pIdt;
468}
469
470
471VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
472{
473 if (pcbLimit)
474 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
475 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
476}
477
478
479VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
480{
481 return pVCpu->cpum.s.Hyper.ldtr.Sel;
482}
483
484
485VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
486{
487 return pVCpu->cpum.s.Hyper.dr[0];
488}
489
490
491VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
492{
493 return pVCpu->cpum.s.Hyper.dr[1];
494}
495
496
497VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
498{
499 return pVCpu->cpum.s.Hyper.dr[2];
500}
501
502
503VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
504{
505 return pVCpu->cpum.s.Hyper.dr[3];
506}
507
508
509VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
510{
511 return pVCpu->cpum.s.Hyper.dr[6];
512}
513
514
515VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
516{
517 return pVCpu->cpum.s.Hyper.dr[7];
518}
519
520
521/**
522 * Gets the pointer to the internal CPUMCTXCORE structure.
523 * This is only for reading in order to save a few calls.
524 *
525 * @param pVCpu Handle to the virtual cpu.
526 */
527VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
528{
529 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
530}
531
532
533/**
534 * Queries the pointer to the internal CPUMCTX structure.
535 *
536 * @returns The CPUMCTX pointer.
537 * @param pVCpu Handle to the virtual cpu.
538 */
539VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
540{
541 return &pVCpu->cpum.s.Guest;
542}
543
544VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
545{
546#ifdef VBOX_WITH_IEM
547# ifdef VBOX_WITH_RAW_MODE_NOT_R0
548 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
549 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
550# endif
551#endif
552 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
553 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
554 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
555 return VINF_SUCCESS; /* formality, consider it void. */
556}
557
558VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
559{
560#ifdef VBOX_WITH_IEM
561# ifdef VBOX_WITH_RAW_MODE_NOT_R0
562 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
563 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
564# endif
565#endif
566 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
567 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
568 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
569 return VINF_SUCCESS; /* formality, consider it void. */
570}
571
572VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
573{
574#ifdef VBOX_WITH_IEM
575# ifdef VBOX_WITH_RAW_MODE_NOT_R0
576 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
577 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
578# endif
579#endif
580 pVCpu->cpum.s.Guest.tr.Sel = tr;
581 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
582 return VINF_SUCCESS; /* formality, consider it void. */
583}
584
585VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
586{
587#ifdef VBOX_WITH_IEM
588# ifdef VBOX_WITH_RAW_MODE_NOT_R0
589 if ( ( ldtr != 0
590 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
591 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
592 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
593# endif
594#endif
595 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
596 /* The caller will set more hidden bits if it has them. */
597 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
598 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
599 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
600 return VINF_SUCCESS; /* formality, consider it void. */
601}
602
603
604/**
605 * Set the guest CR0.
606 *
607 * When called in GC, the hyper CR0 may be updated if that is
608 * required. The caller only has to take special action if AM,
609 * WP, PG or PE changes.
610 *
611 * @returns VINF_SUCCESS (consider it void).
612 * @param pVCpu Handle to the virtual cpu.
613 * @param cr0 The new CR0 value.
614 */
615VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
616{
617#ifdef IN_RC
618 /*
619 * Check if we need to change hypervisor CR0 because
620 * of math stuff.
621 */
622 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
623 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
624 {
625 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
626 {
627 /*
628 * We haven't saved the host FPU state yet, so TS and MT are both set
629 * and EM should be reflecting the guest EM (it always does this).
630 */
631 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
632 {
633 uint32_t HyperCR0 = ASMGetCR0();
634 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
635 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
636 HyperCR0 &= ~X86_CR0_EM;
637 HyperCR0 |= cr0 & X86_CR0_EM;
638 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
639 ASMSetCR0(HyperCR0);
640 }
641# ifdef VBOX_STRICT
642 else
643 {
644 uint32_t HyperCR0 = ASMGetCR0();
645 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
646 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
647 }
648# endif
649 }
650 else
651 {
652 /*
653 * Already saved the state, so we're just mirroring
654 * the guest flags.
655 */
656 uint32_t HyperCR0 = ASMGetCR0();
657 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
658 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
659 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
660 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
661 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
662 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
663 ASMSetCR0(HyperCR0);
664 }
665 }
666#endif /* IN_RC */
667
668 /*
669 * Check for changes causing TLB flushes (for REM).
670 * The caller is responsible for calling PGM when appropriate.
671 */
672 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
673 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
674 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
675 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
676
677 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
678 return VINF_SUCCESS;
679}
680
681
682VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
683{
684 pVCpu->cpum.s.Guest.cr2 = cr2;
685 return VINF_SUCCESS;
686}
687
688
689VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
690{
691 pVCpu->cpum.s.Guest.cr3 = cr3;
692 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
693 return VINF_SUCCESS;
694}
695
696
697VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
698{
699 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
700 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
701 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
702 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
703 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
704 cr4 &= ~X86_CR4_OSFSXR;
705 pVCpu->cpum.s.Guest.cr4 = cr4;
706 return VINF_SUCCESS;
707}
708
709
710VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
711{
712 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
713 return VINF_SUCCESS;
714}
715
716
717VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
718{
719 pVCpu->cpum.s.Guest.eip = eip;
720 return VINF_SUCCESS;
721}
722
723
724VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
725{
726 pVCpu->cpum.s.Guest.eax = eax;
727 return VINF_SUCCESS;
728}
729
730
731VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
732{
733 pVCpu->cpum.s.Guest.ebx = ebx;
734 return VINF_SUCCESS;
735}
736
737
738VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
739{
740 pVCpu->cpum.s.Guest.ecx = ecx;
741 return VINF_SUCCESS;
742}
743
744
745VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
746{
747 pVCpu->cpum.s.Guest.edx = edx;
748 return VINF_SUCCESS;
749}
750
751
752VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
753{
754 pVCpu->cpum.s.Guest.esp = esp;
755 return VINF_SUCCESS;
756}
757
758
759VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
760{
761 pVCpu->cpum.s.Guest.ebp = ebp;
762 return VINF_SUCCESS;
763}
764
765
766VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
767{
768 pVCpu->cpum.s.Guest.esi = esi;
769 return VINF_SUCCESS;
770}
771
772
773VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
774{
775 pVCpu->cpum.s.Guest.edi = edi;
776 return VINF_SUCCESS;
777}
778
779
780VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
781{
782 pVCpu->cpum.s.Guest.ss.Sel = ss;
783 return VINF_SUCCESS;
784}
785
786
787VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
788{
789 pVCpu->cpum.s.Guest.cs.Sel = cs;
790 return VINF_SUCCESS;
791}
792
793
794VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
795{
796 pVCpu->cpum.s.Guest.ds.Sel = ds;
797 return VINF_SUCCESS;
798}
799
800
801VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
802{
803 pVCpu->cpum.s.Guest.es.Sel = es;
804 return VINF_SUCCESS;
805}
806
807
808VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
809{
810 pVCpu->cpum.s.Guest.fs.Sel = fs;
811 return VINF_SUCCESS;
812}
813
814
815VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
816{
817 pVCpu->cpum.s.Guest.gs.Sel = gs;
818 return VINF_SUCCESS;
819}
820
821
822VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
823{
824 pVCpu->cpum.s.Guest.msrEFER = val;
825}
826
827
828/**
829 * Query an MSR.
830 *
831 * The caller is responsible for checking privilege if the call is the result
832 * of a RDMSR instruction. We'll do the rest.
833 *
834 * @retval VINF_SUCCESS on success.
835 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
836 * expected to take the appropriate actions. @a *puValue is set to 0.
837 * @param pVCpu Pointer to the VMCPU.
838 * @param idMsr The MSR.
839 * @param puValue Where to return the value.
840 *
841 * @remarks This will always return the right values, even when we're in the
842 * recompiler.
843 */
844VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
845{
846 /*
847 * If we don't indicate MSR support in the CPUID feature bits, indicate
848 * that a #GP(0) should be raised.
849 */
850 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
851 {
852 *puValue = 0;
853 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
854 }
855
856 int rc = VINF_SUCCESS;
857 uint8_t const u8Multiplier = 4;
858 switch (idMsr)
859 {
860 case MSR_IA32_TSC:
861 *puValue = TMCpuTickGet(pVCpu);
862 break;
863
864 case MSR_IA32_APICBASE:
865 {
866 PVM pVM = pVCpu->CTX_SUFF(pVM);
867 if ( ( pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1 /* APIC Std feature */
868 && (pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_APIC))
869 || ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 /* APIC Ext feature (AMD) */
870 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD
871 && (pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_APIC))
872 || ( pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1 /* x2APIC */
873 && (pVM->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_X2APIC)))
874 {
875 *puValue = pVCpu->cpum.s.Guest.msrApicBase;
876 }
877 else
878 {
879 *puValue = 0;
880 rc = VERR_CPUM_RAISE_GP_0;
881 }
882 break;
883 }
884
885 case MSR_IA32_CR_PAT:
886 *puValue = pVCpu->cpum.s.Guest.msrPAT;
887 break;
888
889 case MSR_IA32_SYSENTER_CS:
890 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
891 break;
892
893 case MSR_IA32_SYSENTER_EIP:
894 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
895 break;
896
897 case MSR_IA32_SYSENTER_ESP:
898 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
899 break;
900
901 case MSR_IA32_MTRR_CAP:
902 {
903 /* This is currently a bit weird. :-) */
904 uint8_t const cVariableRangeRegs = 0;
905 bool const fSystemManagementRangeRegisters = false;
906 bool const fFixedRangeRegisters = false;
907 bool const fWriteCombiningType = false;
908 *puValue = cVariableRangeRegs
909 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
910 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
911 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
912 break;
913 }
914
915 case MSR_IA32_MTRR_DEF_TYPE:
916 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
917 break;
918
919 case IA32_MTRR_FIX64K_00000:
920 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
921 break;
922 case IA32_MTRR_FIX16K_80000:
923 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
924 break;
925 case IA32_MTRR_FIX16K_A0000:
926 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
927 break;
928 case IA32_MTRR_FIX4K_C0000:
929 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
930 break;
931 case IA32_MTRR_FIX4K_C8000:
932 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
933 break;
934 case IA32_MTRR_FIX4K_D0000:
935 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
936 break;
937 case IA32_MTRR_FIX4K_D8000:
938 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
939 break;
940 case IA32_MTRR_FIX4K_E0000:
941 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
942 break;
943 case IA32_MTRR_FIX4K_E8000:
944 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
945 break;
946 case IA32_MTRR_FIX4K_F0000:
947 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
948 break;
949 case IA32_MTRR_FIX4K_F8000:
950 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
951 break;
952
953 case MSR_K6_EFER:
954 *puValue = pVCpu->cpum.s.Guest.msrEFER;
955 break;
956
957 case MSR_K8_SF_MASK:
958 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
959 break;
960
961 case MSR_K6_STAR:
962 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
963 break;
964
965 case MSR_K8_LSTAR:
966 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
967 break;
968
969 case MSR_K8_CSTAR:
970 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
971 break;
972
973 case MSR_K8_FS_BASE:
974 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
975 break;
976
977 case MSR_K8_GS_BASE:
978 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
979 break;
980
981 case MSR_K8_KERNEL_GS_BASE:
982 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
983 break;
984
985 case MSR_K8_TSC_AUX:
986 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
987 break;
988
989 case MSR_IA32_PERF_STATUS:
990 /** @todo could really be not exactly correct, maybe use host's values */
991 *puValue = UINT64_C(1000) /* TSC increment by tick */
992 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
993 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
994 break;
995
996 case MSR_IA32_FSB_CLOCK_STS:
997 /*
998 * Encoded as:
999 * 0 - 266
1000 * 1 - 133
1001 * 2 - 200
1002 * 3 - return 166
1003 * 5 - return 100
1004 */
1005 *puValue = (2 << 4);
1006 break;
1007
1008 case MSR_IA32_PLATFORM_INFO:
1009 *puValue = (u8Multiplier << 8) /* Flex ratio max */
1010 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
1011 break;
1012
1013 case MSR_IA32_THERM_STATUS:
1014 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
1015 *puValue = RT_BIT(31) /* validity bit */
1016 | (UINT64_C(20) << 16) /* degrees till TCC */;
1017 break;
1018
1019 case MSR_IA32_MISC_ENABLE:
1020#if 0
1021 /* Needs to be tested more before enabling. */
1022 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
1023#else
1024 /* Currenty we don't allow guests to modify enable MSRs. */
1025 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
1026
1027 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
1028
1029 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
1030 /** @todo: add more cpuid-controlled features this way. */
1031#endif
1032 break;
1033
1034#if 0 /*def IN_RING0 */
1035 case MSR_IA32_PLATFORM_ID:
1036 case MSR_IA32_BIOS_SIGN_ID:
1037 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
1038 {
1039 /* Available since the P6 family. VT-x implies that this feature is present. */
1040 if (idMsr == MSR_IA32_PLATFORM_ID)
1041 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
1042 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
1043 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
1044 break;
1045 }
1046 /* no break */
1047#endif
1048
1049 /*
1050 * Intel specifics MSRs:
1051 */
1052 case MSR_IA32_PLATFORM_ID: /* fam/mod >= 6_01 */
1053 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1054 /*case MSR_IA32_BIOS_UPDT_TRIG: - write-only? */
1055 case MSR_IA32_MCP_CAP: /* fam/mod >= 6_01 */
1056 /*case MSR_IA32_MCP_STATUS: - indicated as not present in CAP */
1057 /*case MSR_IA32_MCP_CTRL: - indicated as not present in CAP */
1058 case MSR_IA32_MC0_CTL:
1059 case MSR_IA32_MC0_STATUS:
1060 *puValue = 0;
1061 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1062 {
1063 Log(("MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1064 rc = VERR_CPUM_RAISE_GP_0;
1065 }
1066 break;
1067
1068 default:
1069 /*
1070 * Hand the X2APIC range to PDM and the APIC.
1071 */
1072 if ( idMsr >= MSR_IA32_X2APIC_START
1073 && idMsr <= MSR_IA32_X2APIC_END)
1074 {
1075 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
1076 if (RT_SUCCESS(rc))
1077 rc = VINF_SUCCESS;
1078 else
1079 {
1080 *puValue = 0;
1081 rc = VERR_CPUM_RAISE_GP_0;
1082 }
1083 }
1084 else
1085 {
1086 *puValue = 0;
1087 rc = VERR_CPUM_RAISE_GP_0;
1088 }
1089 break;
1090 }
1091
1092 return rc;
1093}
1094
1095
1096/**
1097 * Sets the MSR.
1098 *
1099 * The caller is responsible for checking privilege if the call is the result
1100 * of a WRMSR instruction. We'll do the rest.
1101 *
1102 * @retval VINF_SUCCESS on success.
1103 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
1104 * appropriate actions.
1105 *
1106 * @param pVCpu Pointer to the VMCPU.
1107 * @param idMsr The MSR id.
1108 * @param uValue The value to set.
1109 *
1110 * @remarks Everyone changing MSR values, including the recompiler, shall do it
1111 * by calling this method. This makes sure we have current values and
1112 * that we trigger all the right actions when something changes.
1113 */
1114VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
1115{
1116 /*
1117 * If we don't indicate MSR support in the CPUID feature bits, indicate
1118 * that a #GP(0) should be raised.
1119 */
1120 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
1121 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
1122
1123 int rc = VINF_SUCCESS;
1124 switch (idMsr)
1125 {
1126 case MSR_IA32_MISC_ENABLE:
1127 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
1128 break;
1129
1130 case MSR_IA32_TSC:
1131 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
1132 break;
1133
1134 case MSR_IA32_APICBASE:
1135 rc = PDMApicSetBase(pVCpu, uValue);
1136 if (rc != VINF_SUCCESS)
1137 rc = VERR_CPUM_RAISE_GP_0;
1138 break;
1139
1140 case MSR_IA32_CR_PAT:
1141 pVCpu->cpum.s.Guest.msrPAT = uValue;
1142 break;
1143
1144 case MSR_IA32_SYSENTER_CS:
1145 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
1146 break;
1147
1148 case MSR_IA32_SYSENTER_EIP:
1149 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
1150 break;
1151
1152 case MSR_IA32_SYSENTER_ESP:
1153 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
1154 break;
1155
1156 case MSR_IA32_MTRR_CAP:
1157 return VERR_CPUM_RAISE_GP_0;
1158
1159 case MSR_IA32_MTRR_DEF_TYPE:
1160 if ( (uValue & UINT64_C(0xfffffffffffff300))
1161 || ( (uValue & 0xff) != 0
1162 && (uValue & 0xff) != 1
1163 && (uValue & 0xff) != 4
1164 && (uValue & 0xff) != 5
1165 && (uValue & 0xff) != 6) )
1166 {
1167 Log(("MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
1168 return VERR_CPUM_RAISE_GP_0;
1169 }
1170 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
1171 break;
1172
1173 case IA32_MTRR_FIX64K_00000:
1174 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1175 break;
1176 case IA32_MTRR_FIX16K_80000:
1177 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1178 break;
1179 case IA32_MTRR_FIX16K_A0000:
1180 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1181 break;
1182 case IA32_MTRR_FIX4K_C0000:
1183 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1184 break;
1185 case IA32_MTRR_FIX4K_C8000:
1186 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1187 break;
1188 case IA32_MTRR_FIX4K_D0000:
1189 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1190 break;
1191 case IA32_MTRR_FIX4K_D8000:
1192 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1193 break;
1194 case IA32_MTRR_FIX4K_E0000:
1195 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1196 break;
1197 case IA32_MTRR_FIX4K_E8000:
1198 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1199 break;
1200 case IA32_MTRR_FIX4K_F0000:
1201 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1202 break;
1203 case IA32_MTRR_FIX4K_F8000:
1204 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1205 break;
1206
1207 /*
1208 * AMD64 MSRs.
1209 */
1210 case MSR_K6_EFER:
1211 {
1212 PVM pVM = pVCpu->CTX_SUFF(pVM);
1213 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1214 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1215 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1216 : 0;
1217 uint64_t fMask = 0;
1218
1219 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1220 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
1221 fMask |= MSR_K6_EFER_NXE;
1222 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1223 fMask |= MSR_K6_EFER_LME;
1224 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
1225 fMask |= MSR_K6_EFER_SCE;
1226 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1227 fMask |= MSR_K6_EFER_FFXSR;
1228
1229 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1230 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1231 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1232 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1233 {
1234 Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1235 return VERR_CPUM_RAISE_GP_0;
1236 }
1237
1238 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1239 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1240 ("Unexpected value %RX64\n", uValue));
1241 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1242
1243 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1244 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1245 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1246 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1247 {
1248 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1249 HMFlushTLB(pVCpu);
1250
1251 /* Notify PGM about NXE changes. */
1252 if ( (uOldEFER & MSR_K6_EFER_NXE)
1253 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1254 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1255 }
1256 break;
1257 }
1258
1259 case MSR_K8_SF_MASK:
1260 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1261 break;
1262
1263 case MSR_K6_STAR:
1264 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1265 break;
1266
1267 case MSR_K8_LSTAR:
1268 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1269 break;
1270
1271 case MSR_K8_CSTAR:
1272 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1273 break;
1274
1275 case MSR_K8_FS_BASE:
1276 pVCpu->cpum.s.Guest.fs.u64Base = uValue;
1277 break;
1278
1279 case MSR_K8_GS_BASE:
1280 pVCpu->cpum.s.Guest.gs.u64Base = uValue;
1281 break;
1282
1283 case MSR_K8_KERNEL_GS_BASE:
1284 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1285 break;
1286
1287 case MSR_K8_TSC_AUX:
1288 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1289 break;
1290
1291 /*
1292 * Intel specifics MSRs:
1293 */
1294 /*case MSR_IA32_PLATFORM_ID: - read-only */
1295 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1296 case MSR_IA32_BIOS_UPDT_TRIG: /* fam/mod >= 6_01 */
1297 /*case MSR_IA32_MCP_CAP: - read-only */
1298 /*case MSR_IA32_MCP_STATUS: - read-only */
1299 /*case MSR_IA32_MCP_CTRL: - indicated as not present in CAP */
1300 /*case MSR_IA32_MC0_CTL: - read-only? */
1301 /*case MSR_IA32_MC0_STATUS: - read-only? */
1302 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1303 {
1304 Log(("MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1305 return VERR_CPUM_RAISE_GP_0;
1306 }
1307 /* ignored */
1308 break;
1309
1310 default:
1311 /*
1312 * Hand the X2APIC range to PDM and the APIC.
1313 */
1314 if ( idMsr >= MSR_IA32_X2APIC_START
1315 && idMsr <= MSR_IA32_X2APIC_END)
1316 {
1317 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1318 if (rc != VINF_SUCCESS)
1319 rc = VERR_CPUM_RAISE_GP_0;
1320 }
1321 else
1322 {
1323 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1324 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1325 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1326 }
1327 break;
1328 }
1329 return rc;
1330}
1331
1332
1333VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1334{
1335 if (pcbLimit)
1336 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1337 return pVCpu->cpum.s.Guest.idtr.pIdt;
1338}
1339
1340
1341VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1342{
1343 if (pHidden)
1344 *pHidden = pVCpu->cpum.s.Guest.tr;
1345 return pVCpu->cpum.s.Guest.tr.Sel;
1346}
1347
1348
1349VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1350{
1351 return pVCpu->cpum.s.Guest.cs.Sel;
1352}
1353
1354
1355VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1356{
1357 return pVCpu->cpum.s.Guest.ds.Sel;
1358}
1359
1360
1361VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1362{
1363 return pVCpu->cpum.s.Guest.es.Sel;
1364}
1365
1366
1367VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1368{
1369 return pVCpu->cpum.s.Guest.fs.Sel;
1370}
1371
1372
1373VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1374{
1375 return pVCpu->cpum.s.Guest.gs.Sel;
1376}
1377
1378
1379VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1380{
1381 return pVCpu->cpum.s.Guest.ss.Sel;
1382}
1383
1384
1385VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1386{
1387 return pVCpu->cpum.s.Guest.ldtr.Sel;
1388}
1389
1390
1391VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
1392{
1393 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
1394 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
1395 return pVCpu->cpum.s.Guest.ldtr.Sel;
1396}
1397
1398
1399VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1400{
1401 return pVCpu->cpum.s.Guest.cr0;
1402}
1403
1404
1405VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1406{
1407 return pVCpu->cpum.s.Guest.cr2;
1408}
1409
1410
1411VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1412{
1413 return pVCpu->cpum.s.Guest.cr3;
1414}
1415
1416
1417VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1418{
1419 return pVCpu->cpum.s.Guest.cr4;
1420}
1421
1422
1423VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1424{
1425 uint64_t u64;
1426 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1427 if (RT_FAILURE(rc))
1428 u64 = 0;
1429 return u64;
1430}
1431
1432
1433VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1434{
1435 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1436}
1437
1438
1439VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1440{
1441 return pVCpu->cpum.s.Guest.eip;
1442}
1443
1444
1445VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1446{
1447 return pVCpu->cpum.s.Guest.rip;
1448}
1449
1450
1451VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1452{
1453 return pVCpu->cpum.s.Guest.eax;
1454}
1455
1456
1457VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1458{
1459 return pVCpu->cpum.s.Guest.ebx;
1460}
1461
1462
1463VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1464{
1465 return pVCpu->cpum.s.Guest.ecx;
1466}
1467
1468
1469VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1470{
1471 return pVCpu->cpum.s.Guest.edx;
1472}
1473
1474
1475VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1476{
1477 return pVCpu->cpum.s.Guest.esi;
1478}
1479
1480
1481VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1482{
1483 return pVCpu->cpum.s.Guest.edi;
1484}
1485
1486
1487VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1488{
1489 return pVCpu->cpum.s.Guest.esp;
1490}
1491
1492
1493VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1494{
1495 return pVCpu->cpum.s.Guest.ebp;
1496}
1497
1498
1499VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1500{
1501 return pVCpu->cpum.s.Guest.eflags.u32;
1502}
1503
1504
1505VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1506{
1507 switch (iReg)
1508 {
1509 case DISCREG_CR0:
1510 *pValue = pVCpu->cpum.s.Guest.cr0;
1511 break;
1512
1513 case DISCREG_CR2:
1514 *pValue = pVCpu->cpum.s.Guest.cr2;
1515 break;
1516
1517 case DISCREG_CR3:
1518 *pValue = pVCpu->cpum.s.Guest.cr3;
1519 break;
1520
1521 case DISCREG_CR4:
1522 *pValue = pVCpu->cpum.s.Guest.cr4;
1523 break;
1524
1525 case DISCREG_CR8:
1526 {
1527 uint8_t u8Tpr;
1528 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /*pfPending*/);
1529 if (RT_FAILURE(rc))
1530 {
1531 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1532 *pValue = 0;
1533 return rc;
1534 }
1535 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1536 break;
1537 }
1538
1539 default:
1540 return VERR_INVALID_PARAMETER;
1541 }
1542 return VINF_SUCCESS;
1543}
1544
1545
1546VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1547{
1548 return pVCpu->cpum.s.Guest.dr[0];
1549}
1550
1551
1552VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1553{
1554 return pVCpu->cpum.s.Guest.dr[1];
1555}
1556
1557
1558VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1559{
1560 return pVCpu->cpum.s.Guest.dr[2];
1561}
1562
1563
1564VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1565{
1566 return pVCpu->cpum.s.Guest.dr[3];
1567}
1568
1569
1570VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1571{
1572 return pVCpu->cpum.s.Guest.dr[6];
1573}
1574
1575
1576VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1577{
1578 return pVCpu->cpum.s.Guest.dr[7];
1579}
1580
1581
1582VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1583{
1584 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1585 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1586 if (iReg == 4 || iReg == 5)
1587 iReg += 2;
1588 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1589 return VINF_SUCCESS;
1590}
1591
1592
1593VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1594{
1595 return pVCpu->cpum.s.Guest.msrEFER;
1596}
1597
1598
1599/**
1600 * Gets a CPUID leaf.
1601 *
1602 * @param pVCpu Pointer to the VMCPU.
1603 * @param iLeaf The CPUID leaf to get.
1604 * @param pEax Where to store the EAX value.
1605 * @param pEbx Where to store the EBX value.
1606 * @param pEcx Where to store the ECX value.
1607 * @param pEdx Where to store the EDX value.
1608 */
1609VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1610{
1611 PVM pVM = pVCpu->CTX_SUFF(pVM);
1612
1613 PCCPUMCPUID pCpuId;
1614 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1615 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1616 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1617 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1618 else if ( iLeaf - UINT32_C(0x40000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdHyper)
1619 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP))
1620 pCpuId = &pVM->cpum.s.aGuestCpuIdHyper[iLeaf - UINT32_C(0x40000000)]; /* Only report if HVP bit set. */
1621 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1622 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1623 else
1624 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1625
1626 uint32_t cCurrentCacheIndex = *pEcx;
1627
1628 *pEax = pCpuId->eax;
1629 *pEbx = pCpuId->ebx;
1630 *pEcx = pCpuId->ecx;
1631 *pEdx = pCpuId->edx;
1632
1633 if ( iLeaf == 1)
1634 {
1635 /* Bits 31-24: Initial APIC ID */
1636 Assert(pVCpu->idCpu <= 255);
1637 *pEbx |= (pVCpu->idCpu << 24);
1638 }
1639
1640 if ( iLeaf == 4
1641 && cCurrentCacheIndex < 3
1642 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1643 {
1644 uint32_t type, level, sharing, linesize,
1645 partitions, associativity, sets, cores;
1646
1647 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1648 partitions = 1;
1649 /* Those are only to shut up compiler, as they will always
1650 get overwritten, and compiler should be able to figure that out */
1651 sets = associativity = sharing = level = 1;
1652 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1653 switch (cCurrentCacheIndex)
1654 {
1655 case 0:
1656 type = 1;
1657 level = 1;
1658 sharing = 1;
1659 linesize = 64;
1660 associativity = 8;
1661 sets = 64;
1662 break;
1663 case 1:
1664 level = 1;
1665 type = 2;
1666 sharing = 1;
1667 linesize = 64;
1668 associativity = 8;
1669 sets = 64;
1670 break;
1671 default: /* shut up gcc.*/
1672 AssertFailed();
1673 case 2:
1674 level = 2;
1675 type = 3;
1676 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1677 linesize = 64;
1678 associativity = 24;
1679 sets = 4096;
1680 break;
1681 }
1682
1683 *pEax |= ((cores - 1) << 26) |
1684 ((sharing - 1) << 14) |
1685 (level << 5) |
1686 1;
1687 *pEbx = (linesize - 1) |
1688 ((partitions - 1) << 12) |
1689 ((associativity - 1) << 22); /* -1 encoding */
1690 *pEcx = sets - 1;
1691 }
1692
1693 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1694}
1695
1696/**
1697 * Gets a number of standard CPUID leafs.
1698 *
1699 * @returns Number of leafs.
1700 * @param pVM Pointer to the VM.
1701 * @remark Intended for PATM.
1702 */
1703VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1704{
1705 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1706}
1707
1708
1709/**
1710 * Gets a number of extended CPUID leafs.
1711 *
1712 * @returns Number of leafs.
1713 * @param pVM Pointer to the VM.
1714 * @remark Intended for PATM.
1715 */
1716VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1717{
1718 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1719}
1720
1721
1722/**
1723 * Gets a number of centaur CPUID leafs.
1724 *
1725 * @returns Number of leafs.
1726 * @param pVM Pointer to the VM.
1727 * @remark Intended for PATM.
1728 */
1729VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1730{
1731 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1732}
1733
1734
1735/**
1736 * Sets a CPUID feature bit.
1737 *
1738 * @param pVM Pointer to the VM.
1739 * @param enmFeature The feature to set.
1740 */
1741VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1742{
1743 switch (enmFeature)
1744 {
1745 /*
1746 * Set the APIC bit in both feature masks.
1747 */
1748 case CPUMCPUIDFEATURE_APIC:
1749 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1750 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1751 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1752 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1753 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1754 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1755 break;
1756
1757 /*
1758 * Set the x2APIC bit in the standard feature mask.
1759 */
1760 case CPUMCPUIDFEATURE_X2APIC:
1761 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1762 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1763 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1764 break;
1765
1766 /*
1767 * Set the sysenter/sysexit bit in the standard feature mask.
1768 * Assumes the caller knows what it's doing! (host must support these)
1769 */
1770 case CPUMCPUIDFEATURE_SEP:
1771 {
1772 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1773 {
1774 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1775 return;
1776 }
1777
1778 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1779 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1780 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1781 break;
1782 }
1783
1784 /*
1785 * Set the syscall/sysret bit in the extended feature mask.
1786 * Assumes the caller knows what it's doing! (host must support these)
1787 */
1788 case CPUMCPUIDFEATURE_SYSCALL:
1789 {
1790 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1791 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
1792 {
1793#if HC_ARCH_BITS == 32
1794 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
1795 * Even when the cpu is capable of doing so in 64 bits mode.
1796 */
1797 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1798 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1799 || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
1800#endif
1801 {
1802 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1803 return;
1804 }
1805 }
1806 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1807 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1808 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1809 break;
1810 }
1811
1812 /*
1813 * Set the PAE bit in both feature masks.
1814 * Assumes the caller knows what it's doing! (host must support these)
1815 */
1816 case CPUMCPUIDFEATURE_PAE:
1817 {
1818 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1819 {
1820 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1821 return;
1822 }
1823
1824 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1825 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1826 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1827 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1828 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1829 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1830 break;
1831 }
1832
1833 /*
1834 * Set the LONG MODE bit in the extended feature mask.
1835 * Assumes the caller knows what it's doing! (host must support these)
1836 */
1837 case CPUMCPUIDFEATURE_LONG_MODE:
1838 {
1839 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1840 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
1841 {
1842 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1843 return;
1844 }
1845
1846 /* Valid for both Intel and AMD. */
1847 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1848 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1849 break;
1850 }
1851
1852 /*
1853 * Set the NX/XD bit in the extended feature mask.
1854 * Assumes the caller knows what it's doing! (host must support these)
1855 */
1856 case CPUMCPUIDFEATURE_NX:
1857 {
1858 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1859 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
1860 {
1861 LogRel(("WARNING: Can't turn on NX/XD when the host doesn't support it!!\n"));
1862 return;
1863 }
1864
1865 /* Valid for both Intel and AMD. */
1866 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1867 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NX\n"));
1868 break;
1869 }
1870
1871 /*
1872 * Set the LAHF/SAHF support in 64-bit mode.
1873 * Assumes the caller knows what it's doing! (host must support this)
1874 */
1875 case CPUMCPUIDFEATURE_LAHF:
1876 {
1877 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1878 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
1879 {
1880 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1881 return;
1882 }
1883
1884 /* Valid for both Intel and AMD. */
1885 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1886 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1887 break;
1888 }
1889
1890 case CPUMCPUIDFEATURE_PAT:
1891 {
1892 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1893 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1894 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1895 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1896 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1897 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAT\n"));
1898 break;
1899 }
1900
1901 /*
1902 * Set the RDTSCP support bit.
1903 * Assumes the caller knows what it's doing! (host must support this)
1904 */
1905 case CPUMCPUIDFEATURE_RDTSCP:
1906 {
1907 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1908 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
1909 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1910 {
1911 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1912 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1913 return;
1914 }
1915
1916 /* Valid for both Intel and AMD. */
1917 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1918 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1919 break;
1920 }
1921
1922 /*
1923 * Set the Hypervisor Present bit in the standard feature mask.
1924 */
1925 case CPUMCPUIDFEATURE_HVP:
1926 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1927 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
1928 LogRel(("CPUMSetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1929 break;
1930
1931 default:
1932 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1933 break;
1934 }
1935 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1936 {
1937 PVMCPU pVCpu = &pVM->aCpus[i];
1938 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1939 }
1940}
1941
1942
1943/**
1944 * Queries a CPUID feature bit.
1945 *
1946 * @returns boolean for feature presence
1947 * @param pVM Pointer to the VM.
1948 * @param enmFeature The feature to query.
1949 */
1950VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1951{
1952 switch (enmFeature)
1953 {
1954 case CPUMCPUIDFEATURE_PAE:
1955 {
1956 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1957 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1958 break;
1959 }
1960
1961 case CPUMCPUIDFEATURE_NX:
1962 {
1963 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1964 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
1965 }
1966
1967 case CPUMCPUIDFEATURE_RDTSCP:
1968 {
1969 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1970 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
1971 break;
1972 }
1973
1974 case CPUMCPUIDFEATURE_LONG_MODE:
1975 {
1976 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1977 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
1978 break;
1979 }
1980
1981 default:
1982 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1983 break;
1984 }
1985 return false;
1986}
1987
1988
1989/**
1990 * Clears a CPUID feature bit.
1991 *
1992 * @param pVM Pointer to the VM.
1993 * @param enmFeature The feature to clear.
1994 */
1995VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1996{
1997 switch (enmFeature)
1998 {
1999 /*
2000 * Set the APIC bit in both feature masks.
2001 */
2002 case CPUMCPUIDFEATURE_APIC:
2003 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2004 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
2005 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2006 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2007 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
2008 Log(("CPUMClearGuestCpuIdFeature: Disabled APIC\n"));
2009 break;
2010
2011 /*
2012 * Clear the x2APIC bit in the standard feature mask.
2013 */
2014 case CPUMCPUIDFEATURE_X2APIC:
2015 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2016 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
2017 Log(("CPUMClearGuestCpuIdFeature: Disabled x2APIC\n"));
2018 break;
2019
2020 case CPUMCPUIDFEATURE_PAE:
2021 {
2022 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2023 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
2024 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2025 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2026 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
2027 Log(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
2028 break;
2029 }
2030
2031 case CPUMCPUIDFEATURE_PAT:
2032 {
2033 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2034 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
2035 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2036 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2037 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
2038 Log(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
2039 break;
2040 }
2041
2042 case CPUMCPUIDFEATURE_LONG_MODE:
2043 {
2044 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2045 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2046 break;
2047 }
2048
2049 case CPUMCPUIDFEATURE_LAHF:
2050 {
2051 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2052 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2053 break;
2054 }
2055
2056 case CPUMCPUIDFEATURE_RDTSCP:
2057 {
2058 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2059 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2060 Log(("CPUMClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
2061 break;
2062 }
2063
2064 case CPUMCPUIDFEATURE_HVP:
2065 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2066 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
2067 break;
2068
2069 default:
2070 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2071 break;
2072 }
2073 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2074 {
2075 PVMCPU pVCpu = &pVM->aCpus[i];
2076 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2077 }
2078}
2079
2080
2081/**
2082 * Gets the host CPU vendor.
2083 *
2084 * @returns CPU vendor.
2085 * @param pVM Pointer to the VM.
2086 */
2087VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
2088{
2089 return pVM->cpum.s.enmHostCpuVendor;
2090}
2091
2092
2093/**
2094 * Gets the CPU vendor.
2095 *
2096 * @returns CPU vendor.
2097 * @param pVM Pointer to the VM.
2098 */
2099VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
2100{
2101 return pVM->cpum.s.enmGuestCpuVendor;
2102}
2103
2104
2105VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
2106{
2107 pVCpu->cpum.s.Guest.dr[0] = uDr0;
2108 return CPUMRecalcHyperDRx(pVCpu);
2109}
2110
2111
2112VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
2113{
2114 pVCpu->cpum.s.Guest.dr[1] = uDr1;
2115 return CPUMRecalcHyperDRx(pVCpu);
2116}
2117
2118
2119VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
2120{
2121 pVCpu->cpum.s.Guest.dr[2] = uDr2;
2122 return CPUMRecalcHyperDRx(pVCpu);
2123}
2124
2125
2126VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
2127{
2128 pVCpu->cpum.s.Guest.dr[3] = uDr3;
2129 return CPUMRecalcHyperDRx(pVCpu);
2130}
2131
2132
2133VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
2134{
2135 pVCpu->cpum.s.Guest.dr[6] = uDr6;
2136 return CPUMRecalcHyperDRx(pVCpu);
2137}
2138
2139
2140VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
2141{
2142 pVCpu->cpum.s.Guest.dr[7] = uDr7;
2143 return CPUMRecalcHyperDRx(pVCpu);
2144}
2145
2146
2147VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
2148{
2149 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
2150 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
2151 if (iReg == 4 || iReg == 5)
2152 iReg += 2;
2153 pVCpu->cpum.s.Guest.dr[iReg] = Value;
2154 return CPUMRecalcHyperDRx(pVCpu);
2155}
2156
2157
2158/**
2159 * Recalculates the hypervisor DRx register values based on
2160 * current guest registers and DBGF breakpoints.
2161 *
2162 * This is called whenever a guest DRx register is modified and when DBGF
2163 * sets a hardware breakpoint. In guest context this function will reload
2164 * any (hyper) DRx registers which comes out with a different value.
2165 *
2166 * @returns VINF_SUCCESS.
2167 * @param pVCpu Pointer to the VMCPU.
2168 */
2169VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
2170{
2171 PVM pVM = pVCpu->CTX_SUFF(pVM);
2172
2173 /*
2174 * Compare the DR7s first.
2175 *
2176 * We only care about the enabled flags. The GE and LE flags are always
2177 * set and we don't care if the guest doesn't set them. GD is virtualized
2178 * when we dispatch #DB, we never enable it.
2179 */
2180 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
2181#ifdef CPUM_VIRTUALIZE_DRX
2182 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
2183#else
2184 const RTGCUINTREG uGstDr7 = 0;
2185#endif
2186 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
2187 {
2188 /*
2189 * Ok, something is enabled. Recalc each of the breakpoints.
2190 * Straight forward code, not optimized/minimized in any way.
2191 */
2192 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
2193
2194 /* bp 0 */
2195 RTGCUINTREG uNewDr0;
2196 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
2197 {
2198 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2199 uNewDr0 = DBGFBpGetDR0(pVM);
2200 }
2201 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
2202 {
2203 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2204 uNewDr0 = CPUMGetGuestDR0(pVCpu);
2205 }
2206 else
2207 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
2208
2209 /* bp 1 */
2210 RTGCUINTREG uNewDr1;
2211 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
2212 {
2213 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2214 uNewDr1 = DBGFBpGetDR1(pVM);
2215 }
2216 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2217 {
2218 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2219 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2220 }
2221 else
2222 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
2223
2224 /* bp 2 */
2225 RTGCUINTREG uNewDr2;
2226 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2227 {
2228 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2229 uNewDr2 = DBGFBpGetDR2(pVM);
2230 }
2231 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2232 {
2233 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2234 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2235 }
2236 else
2237 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
2238
2239 /* bp 3 */
2240 RTGCUINTREG uNewDr3;
2241 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2242 {
2243 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2244 uNewDr3 = DBGFBpGetDR3(pVM);
2245 }
2246 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2247 {
2248 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2249 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2250 }
2251 else
2252 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
2253
2254 /*
2255 * Apply the updates.
2256 */
2257#ifdef IN_RC
2258 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
2259 {
2260 /** @todo save host DBx registers. */
2261 }
2262#endif
2263 /** @todo Should this not be setting CPUM_USE_DEBUG_REGS_HYPER?
2264 * (CPUM_VIRTUALIZE_DRX is never defined). */
2265 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
2266 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2267 CPUMSetHyperDR3(pVCpu, uNewDr3);
2268 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2269 CPUMSetHyperDR2(pVCpu, uNewDr2);
2270 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2271 CPUMSetHyperDR1(pVCpu, uNewDr1);
2272 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2273 CPUMSetHyperDR0(pVCpu, uNewDr0);
2274 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2275 CPUMSetHyperDR7(pVCpu, uNewDr7);
2276 }
2277 else
2278 {
2279#ifdef IN_RC
2280 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
2281 {
2282 /** @todo restore host DBx registers. */
2283 }
2284#endif
2285 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2286 }
2287 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2288 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2289 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2290 pVCpu->cpum.s.Hyper.dr[7]));
2291
2292 return VINF_SUCCESS;
2293}
2294
2295
2296/**
2297 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2298 *
2299 * @returns true if in real mode, otherwise false.
2300 * @param pVCpu Pointer to the VMCPU.
2301 */
2302VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2303{
2304 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2305}
2306
2307
2308/**
2309 * Tests if the guest has the Page Size Extension enabled (PSE).
2310 *
2311 * @returns true if in real mode, otherwise false.
2312 * @param pVCpu Pointer to the VMCPU.
2313 */
2314VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2315{
2316 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2317 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2318}
2319
2320
2321/**
2322 * Tests if the guest has the paging enabled (PG).
2323 *
2324 * @returns true if in real mode, otherwise false.
2325 * @param pVCpu Pointer to the VMCPU.
2326 */
2327VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2328{
2329 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2330}
2331
2332
2333/**
2334 * Tests if the guest has the paging enabled (PG).
2335 *
2336 * @returns true if in real mode, otherwise false.
2337 * @param pVCpu Pointer to the VMCPU.
2338 */
2339VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2340{
2341 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2342}
2343
2344
2345/**
2346 * Tests if the guest is running in real mode or not.
2347 *
2348 * @returns true if in real mode, otherwise false.
2349 * @param pVCpu Pointer to the VMCPU.
2350 */
2351VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2352{
2353 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2354}
2355
2356
2357/**
2358 * Tests if the guest is running in real or virtual 8086 mode.
2359 *
2360 * @returns @c true if it is, @c false if not.
2361 * @param pVCpu Pointer to the VMCPU.
2362 */
2363VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2364{
2365 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2366 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2367}
2368
2369
2370/**
2371 * Tests if the guest is running in protected or not.
2372 *
2373 * @returns true if in protected mode, otherwise false.
2374 * @param pVCpu Pointer to the VMCPU.
2375 */
2376VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2377{
2378 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2379}
2380
2381
2382/**
2383 * Tests if the guest is running in paged protected or not.
2384 *
2385 * @returns true if in paged protected mode, otherwise false.
2386 * @param pVCpu Pointer to the VMCPU.
2387 */
2388VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2389{
2390 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2391}
2392
2393
2394/**
2395 * Tests if the guest is running in long mode or not.
2396 *
2397 * @returns true if in long mode, otherwise false.
2398 * @param pVCpu Pointer to the VMCPU.
2399 */
2400VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2401{
2402 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2403}
2404
2405
2406/**
2407 * Tests if the guest is running in PAE mode or not.
2408 *
2409 * @returns true if in PAE mode, otherwise false.
2410 * @param pVCpu Pointer to the VMCPU.
2411 */
2412VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2413{
2414#ifdef VBOX_WITH_OLD_VTX_CODE
2415 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2416 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
2417 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2418#else
2419 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2420 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2421 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LME);
2422#endif
2423}
2424
2425
2426/**
2427 * Tests if the guest is running in 64 bits mode or not.
2428 *
2429 * @returns true if in 64 bits protected mode, otherwise false.
2430 * @param pVCpu The current virtual CPU.
2431 */
2432VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2433{
2434 if (!CPUMIsGuestInLongMode(pVCpu))
2435 return false;
2436 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2437 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2438}
2439
2440
2441/**
2442 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2443 * registers.
2444 *
2445 * @returns true if in 64 bits protected mode, otherwise false.
2446 * @param pCtx Pointer to the current guest CPU context.
2447 */
2448VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2449{
2450 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2451}
2452
2453#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2454/**
2455 *
2456 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2457 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2458 * @param pVCpu The current virtual CPU.
2459 */
2460VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2461{
2462 return pVCpu->cpum.s.fRawEntered;
2463}
2464#endif
2465
2466
2467/**
2468 * Updates the EFLAGS while we're in raw-mode.
2469 *
2470 * @param pVCpu Pointer to the VMCPU.
2471 * @param fEfl The new EFLAGS value.
2472 */
2473VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2474{
2475#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2476 if (pVCpu->cpum.s.fRawEntered)
2477 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
2478 else
2479#endif
2480 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2481}
2482
2483
2484/**
2485 * Gets the EFLAGS while we're in raw-mode.
2486 *
2487 * @returns The eflags.
2488 * @param pVCpu Pointer to the current virtual CPU.
2489 */
2490VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2491{
2492#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2493 if (pVCpu->cpum.s.fRawEntered)
2494 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
2495#endif
2496 return pVCpu->cpum.s.Guest.eflags.u32;
2497}
2498
2499
2500/**
2501 * Sets the specified changed flags (CPUM_CHANGED_*).
2502 *
2503 * @param pVCpu Pointer to the current virtual CPU.
2504 */
2505VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2506{
2507 pVCpu->cpum.s.fChanged |= fChangedFlags;
2508}
2509
2510
2511/**
2512 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2513 * @returns true if supported.
2514 * @returns false if not supported.
2515 * @param pVM Pointer to the VM.
2516 */
2517VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2518{
2519 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2520}
2521
2522
2523/**
2524 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2525 * @returns true if used.
2526 * @returns false if not used.
2527 * @param pVM Pointer to the VM.
2528 */
2529VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2530{
2531 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2532}
2533
2534
2535/**
2536 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2537 * @returns true if used.
2538 * @returns false if not used.
2539 * @param pVM Pointer to the VM.
2540 */
2541VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2542{
2543 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2544}
2545
2546#ifndef IN_RING3
2547
2548/**
2549 * Lazily sync in the FPU/XMM state.
2550 *
2551 * @returns VBox status code.
2552 * @param pVCpu Pointer to the VMCPU.
2553 */
2554VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2555{
2556 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2557}
2558
2559#endif /* !IN_RING3 */
2560
2561/**
2562 * Checks if we activated the FPU/XMM state of the guest OS.
2563 * @returns true if we did.
2564 * @returns false if not.
2565 * @param pVCpu Pointer to the VMCPU.
2566 */
2567VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2568{
2569 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2570}
2571
2572
2573/**
2574 * Deactivate the FPU/XMM state of the guest OS.
2575 * @param pVCpu Pointer to the VMCPU.
2576 */
2577VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2578{
2579 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2580}
2581
2582
2583/**
2584 * Checks if the guest debug state is active.
2585 *
2586 * @returns boolean
2587 * @param pVM Pointer to the VM.
2588 */
2589VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2590{
2591 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2592}
2593
2594/**
2595 * Checks if the hyper debug state is active.
2596 *
2597 * @returns boolean
2598 * @param pVM Pointer to the VM.
2599 */
2600VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2601{
2602 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2603}
2604
2605
2606/**
2607 * Mark the guest's debug state as inactive.
2608 *
2609 * @returns boolean
2610 * @param pVM Pointer to the VM.
2611 */
2612VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2613{
2614 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2615}
2616
2617
2618/**
2619 * Mark the hypervisor's debug state as inactive.
2620 *
2621 * @returns boolean
2622 * @param pVM Pointer to the VM.
2623 */
2624VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2625{
2626 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2627}
2628
2629
2630/**
2631 * Get the current privilege level of the guest.
2632 *
2633 * @returns CPL
2634 * @param pVCpu Pointer to the current virtual CPU.
2635 */
2636VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2637{
2638 /*
2639 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2640 *
2641 * Note! We used to check CS.DPL here, assuming it was always equal to
2642 * CPL even if a conforming segment was loaded. But this truned out to
2643 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2644 * during install after a far call to ring 2 with VT-x. Then on newer
2645 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2646 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2647 *
2648 * So, forget CS.DPL, always use SS.DPL.
2649 *
2650 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2651 * isn't necessarily equal if the segment is conforming.
2652 * See section 4.11.1 in the AMD manual.
2653 */
2654 uint32_t uCpl;
2655 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2656 {
2657 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2658 {
2659 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2660 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2661 else
2662 {
2663 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2664#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2665# ifdef VBOX_WITH_RAW_RING1
2666 if (pVCpu->cpum.s.fRawEntered)
2667 {
2668 if ( uCpl == 2
2669 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2670 uCpl = 1;
2671 else if (uCpl == 1)
2672 uCpl = 0;
2673 }
2674 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2675# else
2676 if (uCpl == 1)
2677 uCpl = 0;
2678# endif
2679#endif
2680 }
2681 }
2682 else
2683 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2684 }
2685 else
2686 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2687 return uCpl;
2688}
2689
2690
2691/**
2692 * Gets the current guest CPU mode.
2693 *
2694 * If paging mode is what you need, check out PGMGetGuestMode().
2695 *
2696 * @returns The CPU mode.
2697 * @param pVCpu Pointer to the VMCPU.
2698 */
2699VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2700{
2701 CPUMMODE enmMode;
2702 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2703 enmMode = CPUMMODE_REAL;
2704 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2705 enmMode = CPUMMODE_PROTECTED;
2706 else
2707 enmMode = CPUMMODE_LONG;
2708
2709 return enmMode;
2710}
2711
2712
2713/**
2714 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2715 *
2716 * @returns 16, 32 or 64.
2717 * @param pVCpu The current virtual CPU.
2718 */
2719VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2720{
2721 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2722 return 16;
2723
2724 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2725 {
2726 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2727 return 16;
2728 }
2729
2730 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2731 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2732 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2733 return 64;
2734
2735 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2736 return 32;
2737
2738 return 16;
2739}
2740
2741
2742VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2743{
2744 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2745 return DISCPUMODE_16BIT;
2746
2747 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2748 {
2749 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2750 return DISCPUMODE_16BIT;
2751 }
2752
2753 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2754 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2755 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2756 return DISCPUMODE_64BIT;
2757
2758 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2759 return DISCPUMODE_32BIT;
2760
2761 return DISCPUMODE_16BIT;
2762}
2763
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette