VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 53390

Last change on this file since 53390 was 52770, checked in by vboxsync, 10 years ago

VMM/CPUM: Fix EFER WRMSR to ignore EFER.LMA bit, trunk regression caused by r96058.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 107.8 KB
Line 
1/* $Id: CPUMAllRegs.cpp 52770 2014-09-17 11:04:43Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/**
57 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
58 *
59 * @returns Pointer to the Virtual CPU.
60 * @param a_pGuestCtx Pointer to the guest context.
61 */
62#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
63
64/**
65 * Lazily loads the hidden parts of a selector register when using raw-mode.
66 */
67#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
68# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 do \
70 { \
71 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
72 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
73 } while (0)
74#else
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
77#endif
78
79
80
81#ifdef VBOX_WITH_RAW_MODE_NOT_R0
82
83/**
84 * Does the lazy hidden selector register loading.
85 *
86 * @param pVCpu The current Virtual CPU.
87 * @param pSReg The selector register to lazily load hidden parts of.
88 */
89static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
90{
91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
92 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
93 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
94
95 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
96 {
97 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
98 pSReg->Attr.u = 0;
99 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
100 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
101 pSReg->Attr.n.u2Dpl = 3;
102 pSReg->Attr.n.u1Present = 1;
103 pSReg->u32Limit = 0x0000ffff;
104 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
105 pSReg->ValidSel = pSReg->Sel;
106 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
107 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
108 }
109 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
110 {
111 /* Real mode - leave the limit and flags alone here, at least for now. */
112 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
113 pSReg->ValidSel = pSReg->Sel;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 }
116 else
117 {
118 /* Protected mode - get it from the selector descriptor tables. */
119 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
120 {
121 Assert(!CPUMIsGuestInLongMode(pVCpu));
122 pSReg->Sel = 0;
123 pSReg->u64Base = 0;
124 pSReg->u32Limit = 0;
125 pSReg->Attr.u = 0;
126 pSReg->ValidSel = 0;
127 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
128 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
129 }
130 else
131 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
132 }
133}
134
135
136/**
137 * Makes sure the hidden CS and SS selector registers are valid, loading them if
138 * necessary.
139 *
140 * @param pVCpu The current virtual CPU.
141 */
142VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
143{
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
145 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
146}
147
148
149/**
150 * Loads a the hidden parts of a selector register.
151 *
152 * @param pVCpu The current virtual CPU.
153 */
154VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
155{
156 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
157}
158
159#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
160
161
162/**
163 * Obsolete.
164 *
165 * We don't support nested hypervisor context interrupts or traps. Life is much
166 * simpler when we don't. It's also slightly faster at times.
167 *
168 * @param pVM Handle to the virtual machine.
169 */
170VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
171{
172 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
173}
174
175
176/**
177 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
178 *
179 * @param pVCpu Pointer to the VMCPU.
180 */
181VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
182{
183 return &pVCpu->cpum.s.Hyper;
184}
185
186
187VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
188{
189 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
190 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
191}
192
193
194VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
195{
196 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
197 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
198}
199
200
201VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
202{
203 pVCpu->cpum.s.Hyper.cr3 = cr3;
204
205#ifdef IN_RC
206 /* Update the current CR3. */
207 ASMSetCR3(cr3);
208#endif
209}
210
211VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
212{
213 return pVCpu->cpum.s.Hyper.cr3;
214}
215
216
217VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
218{
219 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
220}
221
222
223VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
224{
225 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
226}
227
228
229VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
230{
231 pVCpu->cpum.s.Hyper.es.Sel = SelES;
232}
233
234
235VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
236{
237 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
238}
239
240
241VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
242{
243 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
244}
245
246
247VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
248{
249 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
250}
251
252
253VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
254{
255 pVCpu->cpum.s.Hyper.esp = u32ESP;
256}
257
258
259VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
260{
261 pVCpu->cpum.s.Hyper.esp = u32ESP;
262}
263
264
265VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
266{
267 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
268 return VINF_SUCCESS;
269}
270
271
272VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
273{
274 pVCpu->cpum.s.Hyper.eip = u32EIP;
275}
276
277
278/**
279 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
280 * EFLAGS and EIP prior to resuming guest execution.
281 *
282 * All general register not given as a parameter will be set to 0. The EFLAGS
283 * register will be set to sane values for C/C++ code execution with interrupts
284 * disabled and IOPL 0.
285 *
286 * @param pVCpu The current virtual CPU.
287 * @param u32EIP The EIP value.
288 * @param u32ESP The ESP value.
289 * @param u32EAX The EAX value.
290 * @param u32EDX The EDX value.
291 */
292VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
293{
294 pVCpu->cpum.s.Hyper.eip = u32EIP;
295 pVCpu->cpum.s.Hyper.esp = u32ESP;
296 pVCpu->cpum.s.Hyper.eax = u32EAX;
297 pVCpu->cpum.s.Hyper.edx = u32EDX;
298 pVCpu->cpum.s.Hyper.ecx = 0;
299 pVCpu->cpum.s.Hyper.ebx = 0;
300 pVCpu->cpum.s.Hyper.ebp = 0;
301 pVCpu->cpum.s.Hyper.esi = 0;
302 pVCpu->cpum.s.Hyper.edi = 0;
303 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
304}
305
306
307VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
308{
309 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
310}
311
312
313VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
314{
315 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
316}
317
318
319/** @MAYBE_LOAD_DRx
320 * Macro for updating DRx values in raw-mode and ring-0 contexts.
321 */
322#ifdef IN_RING0
323# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
324# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
325# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
326 do { \
327 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
328 a_fnLoad(a_uValue); \
329 else \
330 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
331 } while (0)
332# else
333# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
334 do { \
335 /** @todo we're not loading the correct guest value here! */ \
336 a_fnLoad(a_uValue); \
337 } while (0)
338# endif
339# else
340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
341 do { \
342 a_fnLoad(a_uValue); \
343 } while (0)
344# endif
345
346#elif defined(IN_RC)
347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
348 do { \
349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
350 { a_fnLoad(a_uValue); } \
351 } while (0)
352
353#else
354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
355#endif
356
357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
358{
359 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
361}
362
363
364VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
365{
366 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
368}
369
370
371VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
372{
373 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
375}
376
377
378VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
379{
380 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
382}
383
384
385VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
386{
387 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
388}
389
390
391VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
392{
393 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
394#ifdef IN_RC
395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
396#endif
397}
398
399
400VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
401{
402 return pVCpu->cpum.s.Hyper.cs.Sel;
403}
404
405
406VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
407{
408 return pVCpu->cpum.s.Hyper.ds.Sel;
409}
410
411
412VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
413{
414 return pVCpu->cpum.s.Hyper.es.Sel;
415}
416
417
418VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
419{
420 return pVCpu->cpum.s.Hyper.fs.Sel;
421}
422
423
424VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
425{
426 return pVCpu->cpum.s.Hyper.gs.Sel;
427}
428
429
430VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
431{
432 return pVCpu->cpum.s.Hyper.ss.Sel;
433}
434
435
436VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
437{
438 return pVCpu->cpum.s.Hyper.eax;
439}
440
441
442VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
443{
444 return pVCpu->cpum.s.Hyper.ebx;
445}
446
447
448VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
449{
450 return pVCpu->cpum.s.Hyper.ecx;
451}
452
453
454VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
455{
456 return pVCpu->cpum.s.Hyper.edx;
457}
458
459
460VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
461{
462 return pVCpu->cpum.s.Hyper.esi;
463}
464
465
466VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
467{
468 return pVCpu->cpum.s.Hyper.edi;
469}
470
471
472VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
473{
474 return pVCpu->cpum.s.Hyper.ebp;
475}
476
477
478VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.esp;
481}
482
483
484VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.eflags.u32;
487}
488
489
490VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.eip;
493}
494
495
496VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.rip;
499}
500
501
502VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
503{
504 if (pcbLimit)
505 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
506 return pVCpu->cpum.s.Hyper.idtr.pIdt;
507}
508
509
510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
511{
512 if (pcbLimit)
513 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
514 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
515}
516
517
518VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
519{
520 return pVCpu->cpum.s.Hyper.ldtr.Sel;
521}
522
523
524VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
525{
526 return pVCpu->cpum.s.Hyper.dr[0];
527}
528
529
530VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
531{
532 return pVCpu->cpum.s.Hyper.dr[1];
533}
534
535
536VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
537{
538 return pVCpu->cpum.s.Hyper.dr[2];
539}
540
541
542VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
543{
544 return pVCpu->cpum.s.Hyper.dr[3];
545}
546
547
548VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
549{
550 return pVCpu->cpum.s.Hyper.dr[6];
551}
552
553
554VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
555{
556 return pVCpu->cpum.s.Hyper.dr[7];
557}
558
559
560/**
561 * Gets the pointer to the internal CPUMCTXCORE structure.
562 * This is only for reading in order to save a few calls.
563 *
564 * @param pVCpu Handle to the virtual cpu.
565 */
566VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
567{
568 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
569}
570
571
572/**
573 * Queries the pointer to the internal CPUMCTX structure.
574 *
575 * @returns The CPUMCTX pointer.
576 * @param pVCpu Handle to the virtual cpu.
577 */
578VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
579{
580 return &pVCpu->cpum.s.Guest;
581}
582
583VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
584{
585#ifdef VBOX_WITH_IEM
586# ifdef VBOX_WITH_RAW_MODE_NOT_R0
587 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
588 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
589# endif
590#endif
591 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
592 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
593 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
594 return VINF_SUCCESS; /* formality, consider it void. */
595}
596
597VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
598{
599#ifdef VBOX_WITH_IEM
600# ifdef VBOX_WITH_RAW_MODE_NOT_R0
601 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
602 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
603# endif
604#endif
605 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
606 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
607 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
608 return VINF_SUCCESS; /* formality, consider it void. */
609}
610
611VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
612{
613#ifdef VBOX_WITH_IEM
614# ifdef VBOX_WITH_RAW_MODE_NOT_R0
615 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
617# endif
618#endif
619 pVCpu->cpum.s.Guest.tr.Sel = tr;
620 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
621 return VINF_SUCCESS; /* formality, consider it void. */
622}
623
624VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
625{
626#ifdef VBOX_WITH_IEM
627# ifdef VBOX_WITH_RAW_MODE_NOT_R0
628 if ( ( ldtr != 0
629 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
630 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
631 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
632# endif
633#endif
634 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
635 /* The caller will set more hidden bits if it has them. */
636 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
637 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
638 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
639 return VINF_SUCCESS; /* formality, consider it void. */
640}
641
642
643/**
644 * Set the guest CR0.
645 *
646 * When called in GC, the hyper CR0 may be updated if that is
647 * required. The caller only has to take special action if AM,
648 * WP, PG or PE changes.
649 *
650 * @returns VINF_SUCCESS (consider it void).
651 * @param pVCpu Handle to the virtual cpu.
652 * @param cr0 The new CR0 value.
653 */
654VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
655{
656#ifdef IN_RC
657 /*
658 * Check if we need to change hypervisor CR0 because
659 * of math stuff.
660 */
661 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
662 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
663 {
664 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
665 {
666 /*
667 * We haven't saved the host FPU state yet, so TS and MT are both set
668 * and EM should be reflecting the guest EM (it always does this).
669 */
670 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
671 {
672 uint32_t HyperCR0 = ASMGetCR0();
673 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
674 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
675 HyperCR0 &= ~X86_CR0_EM;
676 HyperCR0 |= cr0 & X86_CR0_EM;
677 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
678 ASMSetCR0(HyperCR0);
679 }
680# ifdef VBOX_STRICT
681 else
682 {
683 uint32_t HyperCR0 = ASMGetCR0();
684 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
685 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
686 }
687# endif
688 }
689 else
690 {
691 /*
692 * Already saved the state, so we're just mirroring
693 * the guest flags.
694 */
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
697 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
698 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
699 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
700 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
701 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
702 ASMSetCR0(HyperCR0);
703 }
704 }
705#endif /* IN_RC */
706
707 /*
708 * Check for changes causing TLB flushes (for REM).
709 * The caller is responsible for calling PGM when appropriate.
710 */
711 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
712 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
713 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
714 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
715
716 /*
717 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
718 */
719 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
720 PGMCr0WpEnabled(pVCpu);
721
722 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
723 return VINF_SUCCESS;
724}
725
726
727VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
728{
729 pVCpu->cpum.s.Guest.cr2 = cr2;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
735{
736 pVCpu->cpum.s.Guest.cr3 = cr3;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
743{
744 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
745 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
746 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
747 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
748 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
749 cr4 &= ~X86_CR4_OSFSXR;
750 pVCpu->cpum.s.Guest.cr4 = cr4;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
756{
757 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
763{
764 pVCpu->cpum.s.Guest.eip = eip;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
770{
771 pVCpu->cpum.s.Guest.eax = eax;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
777{
778 pVCpu->cpum.s.Guest.ebx = ebx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
784{
785 pVCpu->cpum.s.Guest.ecx = ecx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
791{
792 pVCpu->cpum.s.Guest.edx = edx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
798{
799 pVCpu->cpum.s.Guest.esp = esp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
805{
806 pVCpu->cpum.s.Guest.ebp = ebp;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
812{
813 pVCpu->cpum.s.Guest.esi = esi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
819{
820 pVCpu->cpum.s.Guest.edi = edi;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
826{
827 pVCpu->cpum.s.Guest.ss.Sel = ss;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
833{
834 pVCpu->cpum.s.Guest.cs.Sel = cs;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
840{
841 pVCpu->cpum.s.Guest.ds.Sel = ds;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
847{
848 pVCpu->cpum.s.Guest.es.Sel = es;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
854{
855 pVCpu->cpum.s.Guest.fs.Sel = fs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
861{
862 pVCpu->cpum.s.Guest.gs.Sel = gs;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
868{
869 pVCpu->cpum.s.Guest.msrEFER = val;
870}
871
872#ifndef VBOX_WITH_NEW_MSR_CODE
873
874/**
875 * Worker for CPUMQueryGuestMsr().
876 *
877 * @retval VINF_SUCCESS
878 * @retval VERR_CPUM_RAISE_GP_0
879 * @param pVCpu The cross context CPU structure.
880 * @param idMsr The MSR to read.
881 * @param puValue Where to store the return value.
882 */
883static int cpumQueryGuestMsrInt(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
884{
885 /*
886 * If we don't indicate MSR support in the CPUID feature bits, indicate
887 * that a #GP(0) should be raised.
888 */
889 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
890 {
891 *puValue = 0;
892 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
893 }
894
895 int rc = VINF_SUCCESS;
896 uint8_t const u8Multiplier = 4;
897 switch (idMsr)
898 {
899 case MSR_IA32_TSC:
900 *puValue = TMCpuTickGet(pVCpu);
901 break;
902
903 case MSR_IA32_APICBASE:
904 {
905 /* See @bugref{7097} comment #6. */
906 PVM pVM = pVCpu->CTX_SUFF(pVM);
907 if (PDMHasApic(pVM))
908 *puValue = pVCpu->cpum.s.Guest.msrApicBase;
909 else
910 {
911 rc = VERR_CPUM_RAISE_GP_0;
912 *puValue = 0;
913 }
914 break;
915 }
916
917 case MSR_IA32_CR_PAT:
918 *puValue = pVCpu->cpum.s.Guest.msrPAT;
919 break;
920
921 case MSR_IA32_SYSENTER_CS:
922 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
923 break;
924
925 case MSR_IA32_SYSENTER_EIP:
926 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
927 break;
928
929 case MSR_IA32_SYSENTER_ESP:
930 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
931 break;
932
933 case MSR_IA32_MTRR_CAP:
934 {
935 /* This is currently a bit weird. :-) */
936 uint8_t const cVariableRangeRegs = 0;
937 bool const fSystemManagementRangeRegisters = false;
938 bool const fFixedRangeRegisters = false;
939 bool const fWriteCombiningType = false;
940 *puValue = cVariableRangeRegs
941 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
942 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
943 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
944 break;
945 }
946
947 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:
948 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:
949 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:
950 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:
951 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:
952 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:
953 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:
954 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:
955 /** @todo implement variable MTRRs. */
956 *puValue = 0;
957 break;
958#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */
959 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:
960 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:
961 *puValue = 0;
962 break;
963#endif
964
965 case MSR_IA32_MTRR_DEF_TYPE:
966 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
967 break;
968
969 case IA32_MTRR_FIX64K_00000:
970 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
971 break;
972 case IA32_MTRR_FIX16K_80000:
973 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
974 break;
975 case IA32_MTRR_FIX16K_A0000:
976 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
977 break;
978 case IA32_MTRR_FIX4K_C0000:
979 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
980 break;
981 case IA32_MTRR_FIX4K_C8000:
982 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
983 break;
984 case IA32_MTRR_FIX4K_D0000:
985 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
986 break;
987 case IA32_MTRR_FIX4K_D8000:
988 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
989 break;
990 case IA32_MTRR_FIX4K_E0000:
991 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
992 break;
993 case IA32_MTRR_FIX4K_E8000:
994 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
995 break;
996 case IA32_MTRR_FIX4K_F0000:
997 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
998 break;
999 case IA32_MTRR_FIX4K_F8000:
1000 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
1001 break;
1002
1003 case MSR_K6_EFER:
1004 *puValue = pVCpu->cpum.s.Guest.msrEFER;
1005 break;
1006
1007 case MSR_K8_SF_MASK:
1008 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
1009 break;
1010
1011 case MSR_K6_STAR:
1012 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
1013 break;
1014
1015 case MSR_K8_LSTAR:
1016 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
1017 break;
1018
1019 case MSR_K8_CSTAR:
1020 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
1021 break;
1022
1023 case MSR_K8_FS_BASE:
1024 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
1025 break;
1026
1027 case MSR_K8_GS_BASE:
1028 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
1029 break;
1030
1031 case MSR_K8_KERNEL_GS_BASE:
1032 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
1033 break;
1034
1035 case MSR_K8_TSC_AUX:
1036 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
1037 break;
1038
1039 case MSR_IA32_PERF_STATUS:
1040 /** @todo could really be not exactly correct, maybe use host's values
1041 * Apple code indicates that we should use CPU Hz / 1.333MHz here. */
1042 /** @todo Where are the specs implemented here found? */
1043 *puValue = UINT64_C(1000) /* TSC increment by tick */
1044 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
1045 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
1046 break;
1047
1048 case MSR_IA32_FSB_CLOCK_STS:
1049 /*
1050 * Encoded as:
1051 * 0 - 266
1052 * 1 - 133
1053 * 2 - 200
1054 * 3 - return 166
1055 * 5 - return 100
1056 */
1057 *puValue = (2 << 4);
1058 break;
1059
1060 case MSR_IA32_PLATFORM_INFO:
1061 *puValue = ((uint32_t)u8Multiplier << 8) /* Flex ratio max */
1062 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
1063 break;
1064
1065 case MSR_IA32_THERM_STATUS:
1066 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
1067 *puValue = RT_BIT(31) /* validity bit */
1068 | (UINT64_C(20) << 16) /* degrees till TCC */;
1069 break;
1070
1071 case MSR_IA32_MISC_ENABLE:
1072#if 0
1073 /* Needs to be tested more before enabling. */
1074 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
1075#else
1076 /* Currenty we don't allow guests to modify enable MSRs. */
1077 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
1078
1079 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
1080
1081 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
1082 /** @todo: add more cpuid-controlled features this way. */
1083#endif
1084 break;
1085
1086 /** @todo virtualize DEBUGCTL and relatives */
1087 case MSR_IA32_DEBUGCTL:
1088 *puValue = 0;
1089 break;
1090
1091#if 0 /*def IN_RING0 */
1092 case MSR_IA32_PLATFORM_ID:
1093 case MSR_IA32_BIOS_SIGN_ID:
1094 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
1095 {
1096 /* Available since the P6 family. VT-x implies that this feature is present. */
1097 if (idMsr == MSR_IA32_PLATFORM_ID)
1098 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
1099 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
1100 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
1101 break;
1102 }
1103 /* no break */
1104#endif
1105 /*
1106 * The BIOS_SIGN_ID MSR and MSR_IA32_MCP_CAP et al exist on AMD64 as
1107 * well, at least bulldozer have them. Windows 7 is querying them.
1108 * XP has been observed querying MSR_IA32_MC0_CTL.
1109 * XP64 has been observed querying MSR_P4_LASTBRANCH_0 (also on AMD).
1110 */
1111 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1112 case MSR_IA32_MCG_CAP: /* fam/mod >= 6_01 */
1113 case MSR_IA32_MCG_STATUS: /* indicated as not present in CAP */
1114 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */
1115 case MSR_IA32_MC0_CTL:
1116 case MSR_IA32_MC0_STATUS:
1117 case MSR_P4_LASTBRANCH_0:
1118 case MSR_P4_LASTBRANCH_1:
1119 case MSR_P4_LASTBRANCH_2:
1120 case MSR_P4_LASTBRANCH_3:
1121 *puValue = 0;
1122 break;
1123
1124
1125 /*
1126 * Intel specifics MSRs:
1127 */
1128 case MSR_P5_MC_ADDR:
1129 case MSR_P5_MC_TYPE:
1130 case MSR_P4_LASTBRANCH_TOS: /** @todo Are these branch regs still here on more recent CPUs? The documentation doesn't mention them for several archs. */
1131 case MSR_IA32_PERFEVTSEL0: /* NetWare 6.5 wants the these four. (Bet on AMD as well.) */
1132 case MSR_IA32_PERFEVTSEL1:
1133 case MSR_IA32_PMC0:
1134 case MSR_IA32_PMC1:
1135 case MSR_IA32_PLATFORM_ID: /* fam/mod >= 6_01 */
1136 case MSR_IA32_MPERF: /* intel_pstate depends on this but does a validation test */
1137 case MSR_IA32_APERF: /* intel_pstate depends on this but does a validation test */
1138 /*case MSR_IA32_BIOS_UPDT_TRIG: - write-only? */
1139 case MSR_RAPL_POWER_UNIT:
1140 case MSR_BBL_CR_CTL3: /* ca. core arch? */
1141 case MSR_PKG_CST_CONFIG_CONTROL: /* Nahalem, Sandy Bridge */
1142 case MSR_CORE_THREAD_COUNT: /* Apple queries this. */
1143 case MSR_FLEX_RATIO: /* Apple queries this. */
1144 *puValue = 0;
1145 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1146 {
1147 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1148 rc = VERR_CPUM_RAISE_GP_0;
1149 break;
1150 }
1151
1152 /* Provide more plausive values for some of them. */
1153 switch (idMsr)
1154 {
1155 case MSR_RAPL_POWER_UNIT:
1156 *puValue = RT_MAKE_U32_FROM_U8(3 /* power units (1/8 W)*/,
1157 16 /* 15.3 micro-Joules */,
1158 10 /* 976 microseconds increments */,
1159 0);
1160 break;
1161 case MSR_BBL_CR_CTL3:
1162 *puValue = RT_MAKE_U32_FROM_U8(1, /* bit 0 - L2 Hardware Enabled. (RO) */
1163 1, /* bit 8 - L2 Enabled (R/W). */
1164 0, /* bit 23 - L2 Not Present (RO). */
1165 0);
1166 break;
1167 case MSR_PKG_CST_CONFIG_CONTROL:
1168 *puValue = pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl;
1169 break;
1170 case MSR_CORE_THREAD_COUNT:
1171 {
1172 /** @todo restrict this to nehalem. */
1173 PVM pVM = pVCpu->CTX_SUFF(pVM); /* Note! Not sweating the 4-bit core count limit on westmere. */
1174 *puValue = (pVM->cCpus & 0xffff) | ((pVM->cCpus & 0xffff) << 16);
1175 break;
1176 }
1177
1178 case MSR_FLEX_RATIO:
1179 {
1180 /** @todo Check for P4, it's different there. Try find accurate specs. */
1181 *puValue = (uint32_t)u8Multiplier << 8;
1182 break;
1183 }
1184 }
1185 break;
1186
1187#if 0 /* Only on pentium CPUs! */
1188 /* Event counters, not supported. */
1189 case MSR_IA32_CESR:
1190 case MSR_IA32_CTR0:
1191 case MSR_IA32_CTR1:
1192 *puValue = 0;
1193 break;
1194#endif
1195
1196
1197 /*
1198 * AMD specific MSRs:
1199 */
1200 case MSR_K8_SYSCFG:
1201 case MSR_K8_INT_PENDING:
1202 case MSR_K8_NB_CFG: /* (All known values are 0 on reset.) */
1203 case MSR_K8_HWCR: /* Very interesting bits here. :) */
1204 case MSR_K8_VM_CR: /* Windows 8 */
1205 case 0xc0011029: /* quick fix for FreeBSD 9.1. */
1206 case 0xc0010042: /* quick fix for something. */
1207 case 0xc001102a: /* quick fix for w2k8 + opposition. */
1208 case 0xc0011004: /* quick fix for the opposition. */
1209 case 0xc0011005: /* quick fix for the opposition. */
1210 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */
1211 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */
1212 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */
1213 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */
1214 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */
1215 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */
1216 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */
1217 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */
1218 *puValue = 0;
1219 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)
1220 {
1221 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1222 return VERR_CPUM_RAISE_GP_0;
1223 }
1224 /* ignored */
1225 break;
1226
1227 default:
1228 /*
1229 * Hand the X2APIC range to PDM and the APIC.
1230 */
1231 if ( idMsr >= MSR_IA32_X2APIC_START
1232 && idMsr <= MSR_IA32_X2APIC_END)
1233 {
1234 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
1235 if (RT_SUCCESS(rc))
1236 rc = VINF_SUCCESS;
1237 else
1238 {
1239 *puValue = 0;
1240 rc = VERR_CPUM_RAISE_GP_0;
1241 }
1242 }
1243 else
1244 {
1245 *puValue = 0;
1246 rc = VERR_CPUM_RAISE_GP_0;
1247 }
1248 break;
1249 }
1250
1251 return rc;
1252}
1253
1254
1255/**
1256 * Query an MSR.
1257 *
1258 * The caller is responsible for checking privilege if the call is the result
1259 * of a RDMSR instruction. We'll do the rest.
1260 *
1261 * @retval VINF_SUCCESS on success.
1262 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
1263 * expected to take the appropriate actions. @a *puValue is set to 0.
1264 * @param pVCpu Pointer to the VMCPU.
1265 * @param idMsr The MSR.
1266 * @param puValue Where to return the value.
1267 *
1268 * @remarks This will always return the right values, even when we're in the
1269 * recompiler.
1270 */
1271VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
1272{
1273 int rc = cpumQueryGuestMsrInt(pVCpu, idMsr, puValue);
1274 LogFlow(("CPUMQueryGuestMsr: %#x -> %llx rc=%d\n", idMsr, *puValue, rc));
1275 return rc;
1276}
1277
1278
1279/**
1280 * Sets the MSR.
1281 *
1282 * The caller is responsible for checking privilege if the call is the result
1283 * of a WRMSR instruction. We'll do the rest.
1284 *
1285 * @retval VINF_SUCCESS on success.
1286 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
1287 * appropriate actions.
1288 *
1289 * @param pVCpu Pointer to the VMCPU.
1290 * @param idMsr The MSR id.
1291 * @param uValue The value to set.
1292 *
1293 * @remarks Everyone changing MSR values, including the recompiler, shall do it
1294 * by calling this method. This makes sure we have current values and
1295 * that we trigger all the right actions when something changes.
1296 */
1297VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
1298{
1299 LogFlow(("CPUMSetGuestMsr: %#x <- %#llx\n", idMsr, uValue));
1300
1301 /*
1302 * If we don't indicate MSR support in the CPUID feature bits, indicate
1303 * that a #GP(0) should be raised.
1304 */
1305 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
1306 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
1307
1308 int rc = VINF_SUCCESS;
1309 switch (idMsr)
1310 {
1311 case MSR_IA32_MISC_ENABLE:
1312 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
1313 break;
1314
1315 case MSR_IA32_TSC:
1316 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
1317 break;
1318
1319 case MSR_IA32_APICBASE:
1320 rc = PDMApicSetBase(pVCpu, uValue);
1321 if (rc != VINF_SUCCESS)
1322 rc = VERR_CPUM_RAISE_GP_0;
1323 break;
1324
1325 case MSR_IA32_CR_PAT:
1326 pVCpu->cpum.s.Guest.msrPAT = uValue;
1327 break;
1328
1329 case MSR_IA32_SYSENTER_CS:
1330 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
1331 break;
1332
1333 case MSR_IA32_SYSENTER_EIP:
1334 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
1335 break;
1336
1337 case MSR_IA32_SYSENTER_ESP:
1338 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
1339 break;
1340
1341 case MSR_IA32_MTRR_CAP:
1342 return VERR_CPUM_RAISE_GP_0;
1343
1344 case MSR_IA32_MTRR_DEF_TYPE:
1345 if ( (uValue & UINT64_C(0xfffffffffffff300))
1346 || ( (uValue & 0xff) != 0
1347 && (uValue & 0xff) != 1
1348 && (uValue & 0xff) != 4
1349 && (uValue & 0xff) != 5
1350 && (uValue & 0xff) != 6) )
1351 {
1352 Log(("CPUM: MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
1353 return VERR_CPUM_RAISE_GP_0;
1354 }
1355 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
1356 break;
1357
1358 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:
1359 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:
1360 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:
1361 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:
1362 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:
1363 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:
1364 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:
1365 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:
1366 /** @todo implement variable MTRRs. */
1367 break;
1368#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */
1369 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:
1370 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:
1371 break;
1372#endif
1373
1374 case IA32_MTRR_FIX64K_00000:
1375 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1376 break;
1377 case IA32_MTRR_FIX16K_80000:
1378 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1379 break;
1380 case IA32_MTRR_FIX16K_A0000:
1381 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1382 break;
1383 case IA32_MTRR_FIX4K_C0000:
1384 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1385 break;
1386 case IA32_MTRR_FIX4K_C8000:
1387 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1388 break;
1389 case IA32_MTRR_FIX4K_D0000:
1390 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1391 break;
1392 case IA32_MTRR_FIX4K_D8000:
1393 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1394 break;
1395 case IA32_MTRR_FIX4K_E0000:
1396 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1397 break;
1398 case IA32_MTRR_FIX4K_E8000:
1399 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1400 break;
1401 case IA32_MTRR_FIX4K_F0000:
1402 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1403 break;
1404 case IA32_MTRR_FIX4K_F8000:
1405 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1406 break;
1407
1408 /*
1409 * AMD64 MSRs.
1410 */
1411 case MSR_K6_EFER:
1412 {
1413 PVM pVM = pVCpu->CTX_SUFF(pVM);
1414 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1415 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1416 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1417 : 0;
1418 uint64_t fMask = 0;
1419 uint64_t fIgnoreMask = MSR_K6_EFER_LMA;
1420
1421 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1422 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
1423 fMask |= MSR_K6_EFER_NXE;
1424 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1425 fMask |= MSR_K6_EFER_LME;
1426 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
1427 fMask |= MSR_K6_EFER_SCE;
1428 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1429 fMask |= MSR_K6_EFER_FFXSR;
1430
1431 /* #GP(0) If anything outside the allowed bits is set. */
1432 if (uValue & ~(fIgnoreMask | fMask))
1433 {
1434 Log(("CPUM: Settings disallowed EFER bit. uValue=%#RX64 fAllowed=%#RX64 -> #GP(0)\n", uValue, fMask));
1435 return VERR_CPUM_RAISE_GP_0;
1436 }
1437
1438 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1439 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1440 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1441 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1442 {
1443 Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1444 return VERR_CPUM_RAISE_GP_0;
1445 }
1446
1447 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1448 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1449 ("Unexpected value %RX64\n", uValue));
1450 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1451
1452 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1453 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1454 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1455 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1456 {
1457 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1458 HMFlushTLB(pVCpu);
1459
1460 /* Notify PGM about NXE changes. */
1461 if ( (uOldEFER & MSR_K6_EFER_NXE)
1462 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1463 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1464 }
1465 break;
1466 }
1467
1468 case MSR_K8_SF_MASK:
1469 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1470 break;
1471
1472 case MSR_K6_STAR:
1473 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1474 break;
1475
1476 case MSR_K8_LSTAR:
1477 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1478 break;
1479
1480 case MSR_K8_CSTAR:
1481 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1482 break;
1483
1484 case MSR_K8_FS_BASE:
1485 pVCpu->cpum.s.Guest.fs.u64Base = uValue;
1486 break;
1487
1488 case MSR_K8_GS_BASE:
1489 pVCpu->cpum.s.Guest.gs.u64Base = uValue;
1490 break;
1491
1492 case MSR_K8_KERNEL_GS_BASE:
1493 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1494 break;
1495
1496 case MSR_K8_TSC_AUX:
1497 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1498 break;
1499
1500 case MSR_IA32_DEBUGCTL:
1501 /** @todo virtualize DEBUGCTL and relatives */
1502 break;
1503
1504 /*
1505 * Intel specifics MSRs:
1506 */
1507 /*case MSR_IA32_PLATFORM_ID: - read-only */
1508 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1509 case MSR_IA32_BIOS_UPDT_TRIG: /* fam/mod >= 6_01 */
1510 /*case MSR_IA32_MCP_CAP: - read-only */
1511 /*case MSR_IA32_MCG_STATUS: - read-only */
1512 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */
1513 /*case MSR_IA32_MC0_CTL: - read-only? */
1514 /*case MSR_IA32_MC0_STATUS: - read-only? */
1515 case MSR_PKG_CST_CONFIG_CONTROL:
1516 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1517 {
1518 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1519 return VERR_CPUM_RAISE_GP_0;
1520 }
1521
1522 switch (idMsr)
1523 {
1524 case MSR_PKG_CST_CONFIG_CONTROL:
1525 {
1526 if (pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl & RT_BIT_64(15))
1527 {
1528 Log(("MSR_PKG_CST_CONFIG_CONTROL: Write protected -> #GP\n"));
1529 return VERR_CPUM_RAISE_GP_0;
1530 }
1531 static uint64_t s_fMask = UINT64_C(0x01f08407); /** @todo Only Nehalem has 24; Only Sandy has 27 and 28. */
1532 static uint64_t s_fGpInvalid = UINT64_C(0xffffffff00ff0000); /** @todo figure out exactly what's off limits. */
1533 if ((uValue & s_fGpInvalid) || (uValue & 7) >= 5)
1534 {
1535 Log(("MSR_PKG_CST_CONFIG_CONTROL: Invalid value %#llx -> #GP\n", uValue));
1536 return VERR_CPUM_RAISE_GP_0;
1537 }
1538 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = uValue & s_fMask;
1539 break;
1540 }
1541
1542 }
1543 /* ignored */
1544 break;
1545
1546 /*
1547 * AMD specific MSRs:
1548 */
1549 case MSR_K8_SYSCFG: /** @todo can be written, but we ignore that for now. */
1550 case MSR_K8_INT_PENDING: /** @todo can be written, but we ignore that for now. */
1551 case MSR_K8_NB_CFG: /** @todo can be written; the apicid swapping might be used and would need saving, but probably unnecessary. */
1552 case 0xc0011029: /* quick fix for FreeBSd 9.1. */
1553 case 0xc0010042: /* quick fix for something. */
1554 case 0xc001102a: /* quick fix for w2k8 + opposition. */
1555 case 0xc0011004: /* quick fix for the opposition. */
1556 case 0xc0011005: /* quick fix for the opposition. */
1557 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */
1558 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */
1559 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */
1560 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */
1561 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */
1562 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */
1563 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */
1564 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */
1565 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)
1566 {
1567 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1568 return VERR_CPUM_RAISE_GP_0;
1569 }
1570 /* ignored */
1571 break;
1572
1573
1574 default:
1575 /*
1576 * Hand the X2APIC range to PDM and the APIC.
1577 */
1578 if ( idMsr >= MSR_IA32_X2APIC_START
1579 && idMsr <= MSR_IA32_X2APIC_END)
1580 {
1581 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1582 if (rc != VINF_SUCCESS)
1583 rc = VERR_CPUM_RAISE_GP_0;
1584 }
1585 else
1586 {
1587 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1588 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1589 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1590 }
1591 break;
1592 }
1593 return rc;
1594}
1595
1596#endif /* !VBOX_WITH_NEW_MSR_CODE */
1597
1598
1599VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1600{
1601 if (pcbLimit)
1602 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1603 return pVCpu->cpum.s.Guest.idtr.pIdt;
1604}
1605
1606
1607VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1608{
1609 if (pHidden)
1610 *pHidden = pVCpu->cpum.s.Guest.tr;
1611 return pVCpu->cpum.s.Guest.tr.Sel;
1612}
1613
1614
1615VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1616{
1617 return pVCpu->cpum.s.Guest.cs.Sel;
1618}
1619
1620
1621VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1622{
1623 return pVCpu->cpum.s.Guest.ds.Sel;
1624}
1625
1626
1627VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1628{
1629 return pVCpu->cpum.s.Guest.es.Sel;
1630}
1631
1632
1633VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1634{
1635 return pVCpu->cpum.s.Guest.fs.Sel;
1636}
1637
1638
1639VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1640{
1641 return pVCpu->cpum.s.Guest.gs.Sel;
1642}
1643
1644
1645VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1646{
1647 return pVCpu->cpum.s.Guest.ss.Sel;
1648}
1649
1650
1651VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1652{
1653 return pVCpu->cpum.s.Guest.ldtr.Sel;
1654}
1655
1656
1657VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
1658{
1659 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
1660 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
1661 return pVCpu->cpum.s.Guest.ldtr.Sel;
1662}
1663
1664
1665VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1666{
1667 return pVCpu->cpum.s.Guest.cr0;
1668}
1669
1670
1671VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1672{
1673 return pVCpu->cpum.s.Guest.cr2;
1674}
1675
1676
1677VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1678{
1679 return pVCpu->cpum.s.Guest.cr3;
1680}
1681
1682
1683VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1684{
1685 return pVCpu->cpum.s.Guest.cr4;
1686}
1687
1688
1689VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1690{
1691 uint64_t u64;
1692 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1693 if (RT_FAILURE(rc))
1694 u64 = 0;
1695 return u64;
1696}
1697
1698
1699VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1700{
1701 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1702}
1703
1704
1705VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1706{
1707 return pVCpu->cpum.s.Guest.eip;
1708}
1709
1710
1711VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1712{
1713 return pVCpu->cpum.s.Guest.rip;
1714}
1715
1716
1717VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1718{
1719 return pVCpu->cpum.s.Guest.eax;
1720}
1721
1722
1723VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1724{
1725 return pVCpu->cpum.s.Guest.ebx;
1726}
1727
1728
1729VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1730{
1731 return pVCpu->cpum.s.Guest.ecx;
1732}
1733
1734
1735VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1736{
1737 return pVCpu->cpum.s.Guest.edx;
1738}
1739
1740
1741VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1742{
1743 return pVCpu->cpum.s.Guest.esi;
1744}
1745
1746
1747VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1748{
1749 return pVCpu->cpum.s.Guest.edi;
1750}
1751
1752
1753VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1754{
1755 return pVCpu->cpum.s.Guest.esp;
1756}
1757
1758
1759VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1760{
1761 return pVCpu->cpum.s.Guest.ebp;
1762}
1763
1764
1765VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1766{
1767 return pVCpu->cpum.s.Guest.eflags.u32;
1768}
1769
1770
1771VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1772{
1773 switch (iReg)
1774 {
1775 case DISCREG_CR0:
1776 *pValue = pVCpu->cpum.s.Guest.cr0;
1777 break;
1778
1779 case DISCREG_CR2:
1780 *pValue = pVCpu->cpum.s.Guest.cr2;
1781 break;
1782
1783 case DISCREG_CR3:
1784 *pValue = pVCpu->cpum.s.Guest.cr3;
1785 break;
1786
1787 case DISCREG_CR4:
1788 *pValue = pVCpu->cpum.s.Guest.cr4;
1789 break;
1790
1791 case DISCREG_CR8:
1792 {
1793 uint8_t u8Tpr;
1794 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1795 if (RT_FAILURE(rc))
1796 {
1797 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1798 *pValue = 0;
1799 return rc;
1800 }
1801 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1802 break;
1803 }
1804
1805 default:
1806 return VERR_INVALID_PARAMETER;
1807 }
1808 return VINF_SUCCESS;
1809}
1810
1811
1812VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1813{
1814 return pVCpu->cpum.s.Guest.dr[0];
1815}
1816
1817
1818VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1819{
1820 return pVCpu->cpum.s.Guest.dr[1];
1821}
1822
1823
1824VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1825{
1826 return pVCpu->cpum.s.Guest.dr[2];
1827}
1828
1829
1830VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1831{
1832 return pVCpu->cpum.s.Guest.dr[3];
1833}
1834
1835
1836VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1837{
1838 return pVCpu->cpum.s.Guest.dr[6];
1839}
1840
1841
1842VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1843{
1844 return pVCpu->cpum.s.Guest.dr[7];
1845}
1846
1847
1848VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1849{
1850 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1851 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1852 if (iReg == 4 || iReg == 5)
1853 iReg += 2;
1854 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1855 return VINF_SUCCESS;
1856}
1857
1858
1859VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1860{
1861 return pVCpu->cpum.s.Guest.msrEFER;
1862}
1863
1864
1865/**
1866 * Looks up a CPUID leaf in the CPUID leaf array.
1867 *
1868 * @returns Pointer to the leaf if found, NULL if not.
1869 *
1870 * @param pVM Pointer to the cross context VM structure.
1871 * @param uLeaf The leaf to get.
1872 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1873 * isn't.
1874 */
1875PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf)
1876{
1877 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1878 if (iEnd)
1879 {
1880 unsigned iStart = 0;
1881 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1882 for (;;)
1883 {
1884 unsigned i = iStart + (iEnd - iStart) / 2U;
1885 if (uLeaf < paLeaves[i].uLeaf)
1886 {
1887 if (i <= iStart)
1888 return NULL;
1889 iEnd = i;
1890 }
1891 else if (uLeaf > paLeaves[i].uLeaf)
1892 {
1893 i += 1;
1894 if (i >= iEnd)
1895 return NULL;
1896 iStart = i;
1897 }
1898 else
1899 {
1900 uSubLeaf &= paLeaves[i].fSubLeafMask;
1901 if (uSubLeaf != paLeaves[i].uSubLeaf)
1902 {
1903 /* Find the right subleaf. We return the last one before
1904 uSubLeaf if we don't find an exact match. */
1905 if (uSubLeaf < paLeaves[i].uSubLeaf)
1906 while ( i > 0
1907 && uLeaf == paLeaves[i].uLeaf
1908 && uSubLeaf < paLeaves[i].uSubLeaf)
1909 i--;
1910 else
1911 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1912 && uLeaf == paLeaves[i + 1].uLeaf
1913 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1914 i++;
1915 }
1916 return &paLeaves[i];
1917 }
1918 }
1919 }
1920
1921 return NULL;
1922}
1923
1924
1925/**
1926 * Gets a CPUID leaf.
1927 *
1928 * @param pVCpu Pointer to the VMCPU.
1929 * @param iLeaf The CPUID leaf to get.
1930 * @param pEax Where to store the EAX value.
1931 * @param pEbx Where to store the EBX value.
1932 * @param pEcx Where to store the ECX value.
1933 * @param pEdx Where to store the EDX value.
1934 */
1935VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1936{
1937 PVM pVM = pVCpu->CTX_SUFF(pVM);
1938
1939 PCCPUMCPUID pCpuId;
1940 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1941 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1942 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1943 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1944 else if ( iLeaf - UINT32_C(0x40000000) < 0x100 /** @todo Fix this later: Hyper-V says 0x400000FF is the last valid leaf. */
1945 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */
1946 {
1947 PCPUMCPUIDLEAF pHyperLeaf = cpumCpuIdGetLeaf(pVM, iLeaf, 0 /* uSubLeaf */);
1948 if (RT_LIKELY(pHyperLeaf))
1949 {
1950 *pEax = pHyperLeaf->uEax;
1951 *pEbx = pHyperLeaf->uEbx;
1952 *pEcx = pHyperLeaf->uEcx;
1953 *pEdx = pHyperLeaf->uEdx;
1954 }
1955 else
1956 {
1957 *pEax = *pEbx = *pEcx = *pEdx = 0;
1958 }
1959 return;
1960 }
1961 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1962 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1963 else
1964 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1965
1966 uint32_t cCurrentCacheIndex = *pEcx;
1967
1968 *pEax = pCpuId->eax;
1969 *pEbx = pCpuId->ebx;
1970 *pEcx = pCpuId->ecx;
1971 *pEdx = pCpuId->edx;
1972
1973 if ( iLeaf == 1)
1974 {
1975 /* Bits 31-24: Initial APIC ID */
1976 Assert(pVCpu->idCpu <= 255);
1977 *pEbx |= (pVCpu->idCpu << 24);
1978 }
1979
1980 if ( iLeaf == 4
1981 && cCurrentCacheIndex < 3
1982 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1983 {
1984 uint32_t type, level, sharing, linesize,
1985 partitions, associativity, sets, cores;
1986
1987 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1988 partitions = 1;
1989 /* Those are only to shut up compiler, as they will always
1990 get overwritten, and compiler should be able to figure that out */
1991 sets = associativity = sharing = level = 1;
1992 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1993 switch (cCurrentCacheIndex)
1994 {
1995 case 0:
1996 type = 1;
1997 level = 1;
1998 sharing = 1;
1999 linesize = 64;
2000 associativity = 8;
2001 sets = 64;
2002 break;
2003 case 1:
2004 level = 1;
2005 type = 2;
2006 sharing = 1;
2007 linesize = 64;
2008 associativity = 8;
2009 sets = 64;
2010 break;
2011 default: /* shut up gcc.*/
2012 AssertFailed();
2013 case 2:
2014 level = 2;
2015 type = 3;
2016 sharing = cores; /* our L2 cache is modelled as shared between all cores */
2017 linesize = 64;
2018 associativity = 24;
2019 sets = 4096;
2020 break;
2021 }
2022
2023 NOREF(type);
2024 *pEax |= ((cores - 1) << 26) |
2025 ((sharing - 1) << 14) |
2026 (level << 5) |
2027 1;
2028 *pEbx = (linesize - 1) |
2029 ((partitions - 1) << 12) |
2030 ((associativity - 1) << 22); /* -1 encoding */
2031 *pEcx = sets - 1;
2032 }
2033
2034 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
2035}
2036
2037/**
2038 * Gets a number of standard CPUID leafs.
2039 *
2040 * @returns Number of leafs.
2041 * @param pVM Pointer to the VM.
2042 * @remark Intended for PATM.
2043 */
2044VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
2045{
2046 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
2047}
2048
2049
2050/**
2051 * Gets a number of extended CPUID leafs.
2052 *
2053 * @returns Number of leafs.
2054 * @param pVM Pointer to the VM.
2055 * @remark Intended for PATM.
2056 */
2057VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
2058{
2059 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
2060}
2061
2062
2063/**
2064 * Gets a number of centaur CPUID leafs.
2065 *
2066 * @returns Number of leafs.
2067 * @param pVM Pointer to the VM.
2068 * @remark Intended for PATM.
2069 */
2070VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
2071{
2072 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
2073}
2074
2075
2076/**
2077 * Sets a CPUID feature bit.
2078 *
2079 * @param pVM Pointer to the VM.
2080 * @param enmFeature The feature to set.
2081 */
2082VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2083{
2084 PCPUMCPUIDLEAF pLeaf;
2085
2086 switch (enmFeature)
2087 {
2088 /*
2089 * Set the APIC bit in both feature masks.
2090 */
2091 case CPUMCPUIDFEATURE_APIC:
2092 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2093 if (pLeaf)
2094 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
2095
2096 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2097 if ( pLeaf
2098 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
2099 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
2100
2101 pVM->cpum.s.GuestFeatures.fApic = 1;
2102 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
2103 break;
2104
2105 /*
2106 * Set the x2APIC bit in the standard feature mask.
2107 */
2108 case CPUMCPUIDFEATURE_X2APIC:
2109 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2110 if (pLeaf)
2111 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
2112 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
2113 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
2114 break;
2115
2116 /*
2117 * Set the sysenter/sysexit bit in the standard feature mask.
2118 * Assumes the caller knows what it's doing! (host must support these)
2119 */
2120 case CPUMCPUIDFEATURE_SEP:
2121 if (!pVM->cpum.s.HostFeatures.fSysEnter)
2122 {
2123 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
2124 return;
2125 }
2126
2127 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2128 if (pLeaf)
2129 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
2130 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
2131 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
2132 break;
2133
2134 /*
2135 * Set the syscall/sysret bit in the extended feature mask.
2136 * Assumes the caller knows what it's doing! (host must support these)
2137 */
2138 case CPUMCPUIDFEATURE_SYSCALL:
2139 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2140 if ( !pLeaf
2141 || !pVM->cpum.s.HostFeatures.fSysCall)
2142 {
2143#if HC_ARCH_BITS == 32
2144 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
2145 mode by Intel, even when the cpu is capable of doing so in
2146 64-bit mode. Long mode requires syscall support. */
2147 if (!pVM->cpum.s.HostFeatures.fLongMode)
2148#endif
2149 {
2150 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
2151 return;
2152 }
2153 }
2154
2155 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
2156 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
2157 pVM->cpum.s.GuestFeatures.fSysCall = 1;
2158 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
2159 break;
2160
2161 /*
2162 * Set the PAE bit in both feature masks.
2163 * Assumes the caller knows what it's doing! (host must support these)
2164 */
2165 case CPUMCPUIDFEATURE_PAE:
2166 if (!pVM->cpum.s.HostFeatures.fPae)
2167 {
2168 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
2169 return;
2170 }
2171
2172 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2173 if (pLeaf)
2174 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
2175
2176 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2177 if ( pLeaf
2178 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
2179 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
2180
2181 pVM->cpum.s.GuestFeatures.fPae = 1;
2182 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
2183 break;
2184
2185 /*
2186 * Set the LONG MODE bit in the extended feature mask.
2187 * Assumes the caller knows what it's doing! (host must support these)
2188 */
2189 case CPUMCPUIDFEATURE_LONG_MODE:
2190 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2191 if ( !pLeaf
2192 || !pVM->cpum.s.HostFeatures.fLongMode)
2193 {
2194 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
2195 return;
2196 }
2197
2198 /* Valid for both Intel and AMD. */
2199 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2200 pVM->cpum.s.GuestFeatures.fLongMode = 1;
2201 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
2202 break;
2203
2204 /*
2205 * Set the NX/XD bit in the extended feature mask.
2206 * Assumes the caller knows what it's doing! (host must support these)
2207 */
2208 case CPUMCPUIDFEATURE_NX:
2209 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2210 if ( !pLeaf
2211 || !pVM->cpum.s.HostFeatures.fNoExecute)
2212 {
2213 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
2214 return;
2215 }
2216
2217 /* Valid for both Intel and AMD. */
2218 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
2219 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
2220 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
2221 break;
2222
2223
2224 /*
2225 * Set the LAHF/SAHF support in 64-bit mode.
2226 * Assumes the caller knows what it's doing! (host must support this)
2227 */
2228 case CPUMCPUIDFEATURE_LAHF:
2229 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2230 if ( !pLeaf
2231 || !pVM->cpum.s.HostFeatures.fLahfSahf)
2232 {
2233 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
2234 return;
2235 }
2236
2237 /* Valid for both Intel and AMD. */
2238 pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2239 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
2240 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
2241 break;
2242
2243 /*
2244 * Set the page attribute table bit. This is alternative page level
2245 * cache control that doesn't much matter when everything is
2246 * virtualized, though it may when passing thru device memory.
2247 */
2248 case CPUMCPUIDFEATURE_PAT:
2249 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2250 if (pLeaf)
2251 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
2252
2253 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2254 if ( pLeaf
2255 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
2256 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
2257
2258 pVM->cpum.s.GuestFeatures.fPat = 1;
2259 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
2260 break;
2261
2262 /*
2263 * Set the RDTSCP support bit.
2264 * Assumes the caller knows what it's doing! (host must support this)
2265 */
2266 case CPUMCPUIDFEATURE_RDTSCP:
2267 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2268 if ( !pLeaf
2269 || !pVM->cpum.s.HostFeatures.fRdTscP
2270 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
2271 {
2272 if (!pVM->cpum.s.u8PortableCpuIdLevel)
2273 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
2274 return;
2275 }
2276
2277 /* Valid for both Intel and AMD. */
2278 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2279 pVM->cpum.s.HostFeatures.fRdTscP = 1;
2280 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
2281 break;
2282
2283 /*
2284 * Set the Hypervisor Present bit in the standard feature mask.
2285 */
2286 case CPUMCPUIDFEATURE_HVP:
2287 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2288 if (pLeaf)
2289 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
2290 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
2291 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
2292 break;
2293
2294 /*
2295 * Set the MWAIT Extensions Present bit in the MWAIT/MONITOR leaf.
2296 * This currently includes the Present bit and MWAITBREAK bit as well.
2297 */
2298 case CPUMCPUIDFEATURE_MWAIT_EXTS:
2299 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0);
2300 if ( !pLeaf
2301 || !pVM->cpum.s.HostFeatures.fMWaitExtensions)
2302 {
2303 LogRel(("CPUM: WARNING! Can't turn on MWAIT Extensions when the host doesn't support it!\n"));
2304 return;
2305 }
2306
2307 /* Valid for both Intel and AMD. */
2308 pVM->cpum.s.aGuestCpuIdStd[5].ecx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
2309 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
2310 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
2311 break;
2312
2313 default:
2314 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2315 break;
2316 }
2317
2318 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2319 {
2320 PVMCPU pVCpu = &pVM->aCpus[i];
2321 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2322 }
2323}
2324
2325
2326/**
2327 * Queries a CPUID feature bit.
2328 *
2329 * @returns boolean for feature presence
2330 * @param pVM Pointer to the VM.
2331 * @param enmFeature The feature to query.
2332 */
2333VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2334{
2335 switch (enmFeature)
2336 {
2337 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
2338 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
2339 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
2340 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
2341 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
2342 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
2343 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
2344 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
2345 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat;
2346 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
2347 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
2348 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
2349
2350 case CPUMCPUIDFEATURE_INVALID:
2351 case CPUMCPUIDFEATURE_32BIT_HACK:
2352 break;
2353 }
2354 AssertFailed();
2355 return false;
2356}
2357
2358
2359/**
2360 * Clears a CPUID feature bit.
2361 *
2362 * @param pVM Pointer to the VM.
2363 * @param enmFeature The feature to clear.
2364 */
2365VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2366{
2367 PCPUMCPUIDLEAF pLeaf;
2368 switch (enmFeature)
2369 {
2370 case CPUMCPUIDFEATURE_APIC:
2371 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2372 if (pLeaf)
2373 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
2374
2375 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2376 if ( pLeaf
2377 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
2378 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
2379
2380 pVM->cpum.s.GuestFeatures.fApic = 0;
2381 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
2382 break;
2383
2384 case CPUMCPUIDFEATURE_X2APIC:
2385 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2386 if (pLeaf)
2387 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
2388 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
2389 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
2390 break;
2391
2392 case CPUMCPUIDFEATURE_PAE:
2393 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2394 if (pLeaf)
2395 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
2396
2397 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2398 if ( pLeaf
2399 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
2400 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
2401
2402 pVM->cpum.s.GuestFeatures.fPae = 0;
2403 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
2404 break;
2405
2406 case CPUMCPUIDFEATURE_PAT:
2407 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2408 if (pLeaf)
2409 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
2410
2411 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2412 if ( pLeaf
2413 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
2414 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
2415
2416 pVM->cpum.s.GuestFeatures.fPat = 0;
2417 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
2418 break;
2419
2420 case CPUMCPUIDFEATURE_LONG_MODE:
2421 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2422 if (pLeaf)
2423 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2424 pVM->cpum.s.GuestFeatures.fLongMode = 0;
2425 break;
2426
2427 case CPUMCPUIDFEATURE_LAHF:
2428 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2429 if (pLeaf)
2430 pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2431 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
2432 break;
2433
2434 case CPUMCPUIDFEATURE_RDTSCP:
2435 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
2436 if (pLeaf)
2437 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2438 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
2439 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
2440 break;
2441
2442 case CPUMCPUIDFEATURE_HVP:
2443 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
2444 if (pLeaf)
2445 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
2446 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
2447 break;
2448
2449 case CPUMCPUIDFEATURE_MWAIT_EXTS:
2450 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0);
2451 if (pLeaf)
2452 pVM->cpum.s.aGuestCpuIdStd[5].ecx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
2453 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
2454 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
2455 break;
2456
2457 default:
2458 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2459 break;
2460 }
2461
2462 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2463 {
2464 PVMCPU pVCpu = &pVM->aCpus[i];
2465 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2466 }
2467}
2468
2469
2470/**
2471 * Gets the host CPU vendor.
2472 *
2473 * @returns CPU vendor.
2474 * @param pVM Pointer to the VM.
2475 */
2476VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
2477{
2478 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
2479}
2480
2481
2482/**
2483 * Gets the CPU vendor.
2484 *
2485 * @returns CPU vendor.
2486 * @param pVM Pointer to the VM.
2487 */
2488VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
2489{
2490 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
2491}
2492
2493
2494VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
2495{
2496 pVCpu->cpum.s.Guest.dr[0] = uDr0;
2497 return CPUMRecalcHyperDRx(pVCpu, 0, false);
2498}
2499
2500
2501VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
2502{
2503 pVCpu->cpum.s.Guest.dr[1] = uDr1;
2504 return CPUMRecalcHyperDRx(pVCpu, 1, false);
2505}
2506
2507
2508VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
2509{
2510 pVCpu->cpum.s.Guest.dr[2] = uDr2;
2511 return CPUMRecalcHyperDRx(pVCpu, 2, false);
2512}
2513
2514
2515VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
2516{
2517 pVCpu->cpum.s.Guest.dr[3] = uDr3;
2518 return CPUMRecalcHyperDRx(pVCpu, 3, false);
2519}
2520
2521
2522VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
2523{
2524 pVCpu->cpum.s.Guest.dr[6] = uDr6;
2525 return VINF_SUCCESS; /* No need to recalc. */
2526}
2527
2528
2529VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
2530{
2531 pVCpu->cpum.s.Guest.dr[7] = uDr7;
2532 return CPUMRecalcHyperDRx(pVCpu, 7, false);
2533}
2534
2535
2536VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
2537{
2538 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
2539 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
2540 if (iReg == 4 || iReg == 5)
2541 iReg += 2;
2542 pVCpu->cpum.s.Guest.dr[iReg] = Value;
2543 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
2544}
2545
2546
2547/**
2548 * Recalculates the hypervisor DRx register values based on current guest
2549 * registers and DBGF breakpoints, updating changed registers depending on the
2550 * context.
2551 *
2552 * This is called whenever a guest DRx register is modified (any context) and
2553 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
2554 *
2555 * In raw-mode context this function will reload any (hyper) DRx registers which
2556 * comes out with a different value. It may also have to save the host debug
2557 * registers if that haven't been done already. In this context though, we'll
2558 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
2559 * are only important when breakpoints are actually enabled.
2560 *
2561 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
2562 * reloaded by the HM code if it changes. Further more, we will only use the
2563 * combined register set when the VBox debugger is actually using hardware BPs,
2564 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
2565 * concern us here).
2566 *
2567 * In ring-3 we won't be loading anything, so well calculate hypervisor values
2568 * all the time.
2569 *
2570 * @returns VINF_SUCCESS.
2571 * @param pVCpu Pointer to the VMCPU.
2572 * @param iGstReg The guest debug register number that was modified.
2573 * UINT8_MAX if not guest register.
2574 * @param fForceHyper Used in HM to force hyper registers because of single
2575 * stepping.
2576 */
2577VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
2578{
2579 PVM pVM = pVCpu->CTX_SUFF(pVM);
2580
2581 /*
2582 * Compare the DR7s first.
2583 *
2584 * We only care about the enabled flags. GD is virtualized when we
2585 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
2586 * always have the LE and GE bits set, so no need to check and disable
2587 * stuff if they're cleared like we have to for the guest DR7.
2588 */
2589 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
2590 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
2591 uGstDr7 = 0;
2592 else if (!(uGstDr7 & X86_DR7_LE))
2593 uGstDr7 &= ~X86_DR7_LE_ALL;
2594 else if (!(uGstDr7 & X86_DR7_GE))
2595 uGstDr7 &= ~X86_DR7_GE_ALL;
2596
2597 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
2598
2599#ifdef IN_RING0
2600 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
2601 fForceHyper = true;
2602#endif
2603 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
2604 {
2605 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2606#ifdef IN_RC
2607 bool const fHmEnabled = false;
2608#elif defined(IN_RING3)
2609 bool const fHmEnabled = HMIsEnabled(pVM);
2610#endif
2611
2612 /*
2613 * Ok, something is enabled. Recalc each of the breakpoints, taking
2614 * the VM debugger ones of the guest ones. In raw-mode context we will
2615 * not allow breakpoints with values inside the hypervisor area.
2616 */
2617 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
2618
2619 /* bp 0 */
2620 RTGCUINTREG uNewDr0;
2621 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
2622 {
2623 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2624 uNewDr0 = DBGFBpGetDR0(pVM);
2625 }
2626 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
2627 {
2628 uNewDr0 = CPUMGetGuestDR0(pVCpu);
2629#ifndef IN_RING0
2630 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
2631 uNewDr0 = 0;
2632 else
2633#endif
2634 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2635 }
2636 else
2637 uNewDr0 = 0;
2638
2639 /* bp 1 */
2640 RTGCUINTREG uNewDr1;
2641 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
2642 {
2643 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2644 uNewDr1 = DBGFBpGetDR1(pVM);
2645 }
2646 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2647 {
2648 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2649#ifndef IN_RING0
2650 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
2651 uNewDr1 = 0;
2652 else
2653#endif
2654 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2655 }
2656 else
2657 uNewDr1 = 0;
2658
2659 /* bp 2 */
2660 RTGCUINTREG uNewDr2;
2661 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2662 {
2663 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2664 uNewDr2 = DBGFBpGetDR2(pVM);
2665 }
2666 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2667 {
2668 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2669#ifndef IN_RING0
2670 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
2671 uNewDr2 = 0;
2672 else
2673#endif
2674 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2675 }
2676 else
2677 uNewDr2 = 0;
2678
2679 /* bp 3 */
2680 RTGCUINTREG uNewDr3;
2681 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2682 {
2683 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2684 uNewDr3 = DBGFBpGetDR3(pVM);
2685 }
2686 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2687 {
2688 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2689#ifndef IN_RING0
2690 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
2691 uNewDr3 = 0;
2692 else
2693#endif
2694 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2695 }
2696 else
2697 uNewDr3 = 0;
2698
2699 /*
2700 * Apply the updates.
2701 */
2702#ifdef IN_RC
2703 /* Make sure to save host registers first. */
2704 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
2705 {
2706 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
2707 {
2708 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
2709 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
2710 }
2711 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
2712 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
2713 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
2714 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
2715 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
2716
2717 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
2718 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
2719 ASMSetDR0(uNewDr0);
2720 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2721 ASMSetDR1(uNewDr1);
2722 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2723 ASMSetDR2(uNewDr2);
2724 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2725 ASMSetDR3(uNewDr3);
2726 ASMSetDR6(X86_DR6_INIT_VAL);
2727 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2728 ASMSetDR7(uNewDr7);
2729 }
2730 else
2731#endif
2732 {
2733 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2734 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2735 CPUMSetHyperDR3(pVCpu, uNewDr3);
2736 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2737 CPUMSetHyperDR2(pVCpu, uNewDr2);
2738 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2739 CPUMSetHyperDR1(pVCpu, uNewDr1);
2740 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2741 CPUMSetHyperDR0(pVCpu, uNewDr0);
2742 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2743 CPUMSetHyperDR7(pVCpu, uNewDr7);
2744 }
2745 }
2746#ifdef IN_RING0
2747 else if (CPUMIsGuestDebugStateActive(pVCpu))
2748 {
2749 /*
2750 * Reload the register that was modified. Normally this won't happen
2751 * as we won't intercept DRx writes when not having the hyper debug
2752 * state loaded, but in case we do for some reason we'll simply deal
2753 * with it.
2754 */
2755 switch (iGstReg)
2756 {
2757 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2758 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2759 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2760 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2761 default:
2762 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2763 }
2764 }
2765#endif
2766 else
2767 {
2768 /*
2769 * No active debug state any more. In raw-mode this means we have to
2770 * make sure DR7 has everything disabled now, if we armed it already.
2771 * In ring-0 we might end up here when just single stepping.
2772 */
2773#if defined(IN_RC) || defined(IN_RING0)
2774 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2775 {
2776# ifdef IN_RC
2777 ASMSetDR7(X86_DR7_INIT_VAL);
2778# endif
2779 if (pVCpu->cpum.s.Hyper.dr[0])
2780 ASMSetDR0(0);
2781 if (pVCpu->cpum.s.Hyper.dr[1])
2782 ASMSetDR1(0);
2783 if (pVCpu->cpum.s.Hyper.dr[2])
2784 ASMSetDR2(0);
2785 if (pVCpu->cpum.s.Hyper.dr[3])
2786 ASMSetDR3(0);
2787 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2788 }
2789#endif
2790 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2791
2792 /* Clear all the registers. */
2793 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2794 pVCpu->cpum.s.Hyper.dr[3] = 0;
2795 pVCpu->cpum.s.Hyper.dr[2] = 0;
2796 pVCpu->cpum.s.Hyper.dr[1] = 0;
2797 pVCpu->cpum.s.Hyper.dr[0] = 0;
2798
2799 }
2800 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2801 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2802 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2803 pVCpu->cpum.s.Hyper.dr[7]));
2804
2805 return VINF_SUCCESS;
2806}
2807
2808
2809/**
2810 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2811 *
2812 * @returns true if in real mode, otherwise false.
2813 * @param pVCpu Pointer to the VMCPU.
2814 */
2815VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2816{
2817 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2818}
2819
2820
2821/**
2822 * Tests if the guest has the Page Size Extension enabled (PSE).
2823 *
2824 * @returns true if in real mode, otherwise false.
2825 * @param pVCpu Pointer to the VMCPU.
2826 */
2827VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2828{
2829 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2830 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2831}
2832
2833
2834/**
2835 * Tests if the guest has the paging enabled (PG).
2836 *
2837 * @returns true if in real mode, otherwise false.
2838 * @param pVCpu Pointer to the VMCPU.
2839 */
2840VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2841{
2842 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2843}
2844
2845
2846/**
2847 * Tests if the guest has the paging enabled (PG).
2848 *
2849 * @returns true if in real mode, otherwise false.
2850 * @param pVCpu Pointer to the VMCPU.
2851 */
2852VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2853{
2854 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2855}
2856
2857
2858/**
2859 * Tests if the guest is running in real mode or not.
2860 *
2861 * @returns true if in real mode, otherwise false.
2862 * @param pVCpu Pointer to the VMCPU.
2863 */
2864VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2865{
2866 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2867}
2868
2869
2870/**
2871 * Tests if the guest is running in real or virtual 8086 mode.
2872 *
2873 * @returns @c true if it is, @c false if not.
2874 * @param pVCpu Pointer to the VMCPU.
2875 */
2876VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2877{
2878 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2879 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2880}
2881
2882
2883/**
2884 * Tests if the guest is running in protected or not.
2885 *
2886 * @returns true if in protected mode, otherwise false.
2887 * @param pVCpu Pointer to the VMCPU.
2888 */
2889VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2890{
2891 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2892}
2893
2894
2895/**
2896 * Tests if the guest is running in paged protected or not.
2897 *
2898 * @returns true if in paged protected mode, otherwise false.
2899 * @param pVCpu Pointer to the VMCPU.
2900 */
2901VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2902{
2903 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2904}
2905
2906
2907/**
2908 * Tests if the guest is running in long mode or not.
2909 *
2910 * @returns true if in long mode, otherwise false.
2911 * @param pVCpu Pointer to the VMCPU.
2912 */
2913VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2914{
2915 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2916}
2917
2918
2919/**
2920 * Tests if the guest is running in PAE mode or not.
2921 *
2922 * @returns true if in PAE mode, otherwise false.
2923 * @param pVCpu Pointer to the VMCPU.
2924 */
2925VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2926{
2927 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2928 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2929 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2930 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2931 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2932}
2933
2934
2935/**
2936 * Tests if the guest is running in 64 bits mode or not.
2937 *
2938 * @returns true if in 64 bits protected mode, otherwise false.
2939 * @param pVCpu The current virtual CPU.
2940 */
2941VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2942{
2943 if (!CPUMIsGuestInLongMode(pVCpu))
2944 return false;
2945 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2946 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2947}
2948
2949
2950/**
2951 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2952 * registers.
2953 *
2954 * @returns true if in 64 bits protected mode, otherwise false.
2955 * @param pCtx Pointer to the current guest CPU context.
2956 */
2957VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2958{
2959 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2960}
2961
2962#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2963
2964/**
2965 *
2966 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2967 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2968 * @param pVCpu The current virtual CPU.
2969 */
2970VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2971{
2972 return pVCpu->cpum.s.fRawEntered;
2973}
2974
2975/**
2976 * Transforms the guest CPU state to raw-ring mode.
2977 *
2978 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2979 *
2980 * @returns VBox status. (recompiler failure)
2981 * @param pVCpu Pointer to the VMCPU.
2982 * @param pCtxCore The context core (for trap usage).
2983 * @see @ref pg_raw
2984 */
2985VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2986{
2987 PVM pVM = pVCpu->CTX_SUFF(pVM);
2988
2989 Assert(!pVCpu->cpum.s.fRawEntered);
2990 Assert(!pVCpu->cpum.s.fRemEntered);
2991 if (!pCtxCore)
2992 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
2993
2994 /*
2995 * Are we in Ring-0?
2996 */
2997 if ( pCtxCore->ss.Sel
2998 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
2999 && !pCtxCore->eflags.Bits.u1VM)
3000 {
3001 /*
3002 * Enter execution mode.
3003 */
3004 PATMRawEnter(pVM, pCtxCore);
3005
3006 /*
3007 * Set CPL to Ring-1.
3008 */
3009 pCtxCore->ss.Sel |= 1;
3010 if ( pCtxCore->cs.Sel
3011 && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
3012 pCtxCore->cs.Sel |= 1;
3013 }
3014 else
3015 {
3016# ifdef VBOX_WITH_RAW_RING1
3017 if ( EMIsRawRing1Enabled(pVM)
3018 && !pCtxCore->eflags.Bits.u1VM
3019 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
3020 {
3021 /* Set CPL to Ring-2. */
3022 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
3023 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
3024 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
3025 }
3026# else
3027 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
3028 ("ring-1 code not supported\n"));
3029# endif
3030 /*
3031 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
3032 */
3033 PATMRawEnter(pVM, pCtxCore);
3034 }
3035
3036 /*
3037 * Assert sanity.
3038 */
3039 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
3040 AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
3041 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
3042 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
3043
3044 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
3045
3046 pVCpu->cpum.s.fRawEntered = true;
3047 return VINF_SUCCESS;
3048}
3049
3050
3051/**
3052 * Transforms the guest CPU state from raw-ring mode to correct values.
3053 *
3054 * This function will change any selector registers with DPL=1 to DPL=0.
3055 *
3056 * @returns Adjusted rc.
3057 * @param pVCpu Pointer to the VMCPU.
3058 * @param rc Raw mode return code
3059 * @param pCtxCore The context core (for trap usage).
3060 * @see @ref pg_raw
3061 */
3062VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
3063{
3064 PVM pVM = pVCpu->CTX_SUFF(pVM);
3065
3066 /*
3067 * Don't leave if we've already left (in RC).
3068 */
3069 Assert(!pVCpu->cpum.s.fRemEntered);
3070 if (!pVCpu->cpum.s.fRawEntered)
3071 return rc;
3072 pVCpu->cpum.s.fRawEntered = false;
3073
3074 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3075 if (!pCtxCore)
3076 pCtxCore = CPUMCTX2CORE(pCtx);
3077 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
3078 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
3079 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
3080
3081 /*
3082 * Are we executing in raw ring-1?
3083 */
3084 if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
3085 && !pCtxCore->eflags.Bits.u1VM)
3086 {
3087 /*
3088 * Leave execution mode.
3089 */
3090 PATMRawLeave(pVM, pCtxCore, rc);
3091 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
3092 /** @todo See what happens if we remove this. */
3093 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
3094 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
3095 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
3096 pCtxCore->es.Sel &= ~X86_SEL_RPL;
3097 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
3098 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
3099 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
3100 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
3101
3102 /*
3103 * Ring-1 selector => Ring-0.
3104 */
3105 pCtxCore->ss.Sel &= ~X86_SEL_RPL;
3106 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
3107 pCtxCore->cs.Sel &= ~X86_SEL_RPL;
3108 }
3109 else
3110 {
3111 /*
3112 * PATM is taking care of the IOPL and IF flags for us.
3113 */
3114 PATMRawLeave(pVM, pCtxCore, rc);
3115 if (!pCtxCore->eflags.Bits.u1VM)
3116 {
3117# ifdef VBOX_WITH_RAW_RING1
3118 if ( EMIsRawRing1Enabled(pVM)
3119 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)
3120 {
3121 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
3122 /** @todo See what happens if we remove this. */
3123 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2)
3124 pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1;
3125 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2)
3126 pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1;
3127 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2)
3128 pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1;
3129 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2)
3130 pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1;
3131
3132 /*
3133 * Ring-2 selector => Ring-1.
3134 */
3135 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1;
3136 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2)
3137 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1;
3138 }
3139 else
3140 {
3141# endif
3142 /** @todo See what happens if we remove this. */
3143 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
3144 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
3145 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
3146 pCtxCore->es.Sel &= ~X86_SEL_RPL;
3147 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
3148 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
3149 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
3150 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
3151# ifdef VBOX_WITH_RAW_RING1
3152 }
3153# endif
3154 }
3155 }
3156
3157 return rc;
3158}
3159
3160#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3161
3162/**
3163 * Updates the EFLAGS while we're in raw-mode.
3164 *
3165 * @param pVCpu Pointer to the VMCPU.
3166 * @param fEfl The new EFLAGS value.
3167 */
3168VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
3169{
3170#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3171 if (pVCpu->cpum.s.fRawEntered)
3172 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
3173 else
3174#endif
3175 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
3176}
3177
3178
3179/**
3180 * Gets the EFLAGS while we're in raw-mode.
3181 *
3182 * @returns The eflags.
3183 * @param pVCpu Pointer to the current virtual CPU.
3184 */
3185VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
3186{
3187#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3188 if (pVCpu->cpum.s.fRawEntered)
3189 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
3190#endif
3191 return pVCpu->cpum.s.Guest.eflags.u32;
3192}
3193
3194
3195/**
3196 * Sets the specified changed flags (CPUM_CHANGED_*).
3197 *
3198 * @param pVCpu Pointer to the current virtual CPU.
3199 */
3200VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
3201{
3202 pVCpu->cpum.s.fChanged |= fChangedFlags;
3203}
3204
3205
3206/**
3207 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
3208 * @returns true if supported.
3209 * @returns false if not supported.
3210 * @param pVM Pointer to the VM.
3211 */
3212VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
3213{
3214 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
3215}
3216
3217
3218/**
3219 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
3220 * @returns true if used.
3221 * @returns false if not used.
3222 * @param pVM Pointer to the VM.
3223 */
3224VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
3225{
3226 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
3227}
3228
3229
3230/**
3231 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
3232 * @returns true if used.
3233 * @returns false if not used.
3234 * @param pVM Pointer to the VM.
3235 */
3236VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
3237{
3238 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
3239}
3240
3241#ifdef IN_RC
3242
3243/**
3244 * Lazily sync in the FPU/XMM state.
3245 *
3246 * @returns VBox status code.
3247 * @param pVCpu Pointer to the VMCPU.
3248 */
3249VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
3250{
3251 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
3252}
3253
3254#endif /* !IN_RC */
3255
3256/**
3257 * Checks if we activated the FPU/XMM state of the guest OS.
3258 * @returns true if we did.
3259 * @returns false if not.
3260 * @param pVCpu Pointer to the VMCPU.
3261 */
3262VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
3263{
3264 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
3265}
3266
3267
3268/**
3269 * Deactivate the FPU/XMM state of the guest OS.
3270 * @param pVCpu Pointer to the VMCPU.
3271 *
3272 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled
3273 * FPU state management.
3274 */
3275VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
3276{
3277 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
3278 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
3279}
3280
3281
3282/**
3283 * Checks if the guest debug state is active.
3284 *
3285 * @returns boolean
3286 * @param pVM Pointer to the VMCPU.
3287 */
3288VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
3289{
3290 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
3291}
3292
3293
3294/**
3295 * Checks if the guest debug state is to be made active during the world-switch
3296 * (currently only used for the 32->64 switcher case).
3297 *
3298 * @returns boolean
3299 * @param pVM Pointer to the VMCPU.
3300 */
3301VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
3302{
3303 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
3304}
3305
3306
3307/**
3308 * Checks if the hyper debug state is active.
3309 *
3310 * @returns boolean
3311 * @param pVM Pointer to the VM.
3312 */
3313VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
3314{
3315 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
3316}
3317
3318
3319/**
3320 * Checks if the hyper debug state is to be made active during the world-switch
3321 * (currently only used for the 32->64 switcher case).
3322 *
3323 * @returns boolean
3324 * @param pVM Pointer to the VMCPU.
3325 */
3326VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
3327{
3328 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
3329}
3330
3331
3332/**
3333 * Mark the guest's debug state as inactive.
3334 *
3335 * @returns boolean
3336 * @param pVM Pointer to the VM.
3337 * @todo This API doesn't make sense any more.
3338 */
3339VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
3340{
3341 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
3342}
3343
3344
3345/**
3346 * Get the current privilege level of the guest.
3347 *
3348 * @returns CPL
3349 * @param pVCpu Pointer to the current virtual CPU.
3350 */
3351VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
3352{
3353 /*
3354 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
3355 *
3356 * Note! We used to check CS.DPL here, assuming it was always equal to
3357 * CPL even if a conforming segment was loaded. But this truned out to
3358 * only apply to older AMD-V. With VT-x we had an ACP2 regression
3359 * during install after a far call to ring 2 with VT-x. Then on newer
3360 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
3361 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
3362 *
3363 * So, forget CS.DPL, always use SS.DPL.
3364 *
3365 * Note! The SS RPL is always equal to the CPL, while the CS RPL
3366 * isn't necessarily equal if the segment is conforming.
3367 * See section 4.11.1 in the AMD manual.
3368 *
3369 * Update: Where the heck does it say CS.RPL can differ from CPL other than
3370 * right after real->prot mode switch and when in V8086 mode? That
3371 * section says the RPL specified in a direct transfere (call, jmp,
3372 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
3373 * it would be impossible for an exception handle or the iret
3374 * instruction to figure out whether SS:ESP are part of the frame
3375 * or not. VBox or qemu bug must've lead to this misconception.
3376 *
3377 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
3378 * selector into SS with an RPL other than the CPL when CPL != 3 and
3379 * we're in 64-bit mode. The intel dev box doesn't allow this, on
3380 * RPL = CPL. Weird.
3381 */
3382 uint32_t uCpl;
3383 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
3384 {
3385 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3386 {
3387 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
3388 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
3389 else
3390 {
3391 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
3392#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3393# ifdef VBOX_WITH_RAW_RING1
3394 if (pVCpu->cpum.s.fRawEntered)
3395 {
3396 if ( uCpl == 2
3397 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
3398 uCpl = 1;
3399 else if (uCpl == 1)
3400 uCpl = 0;
3401 }
3402 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
3403# else
3404 if (uCpl == 1)
3405 uCpl = 0;
3406# endif
3407#endif
3408 }
3409 }
3410 else
3411 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
3412 }
3413 else
3414 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
3415 return uCpl;
3416}
3417
3418
3419/**
3420 * Gets the current guest CPU mode.
3421 *
3422 * If paging mode is what you need, check out PGMGetGuestMode().
3423 *
3424 * @returns The CPU mode.
3425 * @param pVCpu Pointer to the VMCPU.
3426 */
3427VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
3428{
3429 CPUMMODE enmMode;
3430 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3431 enmMode = CPUMMODE_REAL;
3432 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3433 enmMode = CPUMMODE_PROTECTED;
3434 else
3435 enmMode = CPUMMODE_LONG;
3436
3437 return enmMode;
3438}
3439
3440
3441/**
3442 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
3443 *
3444 * @returns 16, 32 or 64.
3445 * @param pVCpu The current virtual CPU.
3446 */
3447VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
3448{
3449 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3450 return 16;
3451
3452 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3453 {
3454 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
3455 return 16;
3456 }
3457
3458 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
3459 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
3460 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3461 return 64;
3462
3463 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
3464 return 32;
3465
3466 return 16;
3467}
3468
3469
3470VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
3471{
3472 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3473 return DISCPUMODE_16BIT;
3474
3475 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3476 {
3477 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
3478 return DISCPUMODE_16BIT;
3479 }
3480
3481 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
3482 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
3483 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3484 return DISCPUMODE_64BIT;
3485
3486 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
3487 return DISCPUMODE_32BIT;
3488
3489 return DISCPUMODE_16BIT;
3490}
3491
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette