VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 49764

Last change on this file since 49764 was 49549, checked in by vboxsync, 11 years ago

VMM/CPUM/MSRs: when returning the APIC base, don't rely upon the CPUID feature bits but check if a Local APIC is present

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 101.9 KB
Line 
1/* $Id: CPUMAllRegs.cpp 49549 2013-11-19 13:28:01Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/**
57 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
58 *
59 * @returns Pointer to the Virtual CPU.
60 * @param a_pGuestCtx Pointer to the guest context.
61 */
62#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
63
64/**
65 * Lazily loads the hidden parts of a selector register when using raw-mode.
66 */
67#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
68# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 do \
70 { \
71 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
72 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
73 } while (0)
74#else
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
77#endif
78
79
80
81#ifdef VBOX_WITH_RAW_MODE_NOT_R0
82
83/**
84 * Does the lazy hidden selector register loading.
85 *
86 * @param pVCpu The current Virtual CPU.
87 * @param pSReg The selector register to lazily load hidden parts of.
88 */
89static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
90{
91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
92 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
93 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
94
95 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
96 {
97 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
98 pSReg->Attr.u = 0;
99 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
100 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
101 pSReg->Attr.n.u2Dpl = 3;
102 pSReg->Attr.n.u1Present = 1;
103 pSReg->u32Limit = 0x0000ffff;
104 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
105 pSReg->ValidSel = pSReg->Sel;
106 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
107 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
108 }
109 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
110 {
111 /* Real mode - leave the limit and flags alone here, at least for now. */
112 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
113 pSReg->ValidSel = pSReg->Sel;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 }
116 else
117 {
118 /* Protected mode - get it from the selector descriptor tables. */
119 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
120 {
121 Assert(!CPUMIsGuestInLongMode(pVCpu));
122 pSReg->Sel = 0;
123 pSReg->u64Base = 0;
124 pSReg->u32Limit = 0;
125 pSReg->Attr.u = 0;
126 pSReg->ValidSel = 0;
127 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
128 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
129 }
130 else
131 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
132 }
133}
134
135
136/**
137 * Makes sure the hidden CS and SS selector registers are valid, loading them if
138 * necessary.
139 *
140 * @param pVCpu The current virtual CPU.
141 */
142VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
143{
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
145 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
146}
147
148
149/**
150 * Loads a the hidden parts of a selector register.
151 *
152 * @param pVCpu The current virtual CPU.
153 */
154VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
155{
156 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
157}
158
159#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
160
161
162/**
163 * Obsolete.
164 *
165 * We don't support nested hypervisor context interrupts or traps. Life is much
166 * simpler when we don't. It's also slightly faster at times.
167 *
168 * @param pVM Handle to the virtual machine.
169 */
170VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
171{
172 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
173}
174
175
176/**
177 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
178 *
179 * @param pVCpu Pointer to the VMCPU.
180 */
181VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
182{
183 return &pVCpu->cpum.s.Hyper;
184}
185
186
187VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
188{
189 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
190 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
191}
192
193
194VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
195{
196 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
197 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
198}
199
200
201VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
202{
203 pVCpu->cpum.s.Hyper.cr3 = cr3;
204
205#ifdef IN_RC
206 /* Update the current CR3. */
207 ASMSetCR3(cr3);
208#endif
209}
210
211VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
212{
213 return pVCpu->cpum.s.Hyper.cr3;
214}
215
216
217VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
218{
219 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
220}
221
222
223VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
224{
225 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
226}
227
228
229VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
230{
231 pVCpu->cpum.s.Hyper.es.Sel = SelES;
232}
233
234
235VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
236{
237 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
238}
239
240
241VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
242{
243 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
244}
245
246
247VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
248{
249 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
250}
251
252
253VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
254{
255 pVCpu->cpum.s.Hyper.esp = u32ESP;
256}
257
258
259VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
260{
261 pVCpu->cpum.s.Hyper.esp = u32ESP;
262}
263
264
265VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
266{
267 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
268 return VINF_SUCCESS;
269}
270
271
272VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
273{
274 pVCpu->cpum.s.Hyper.eip = u32EIP;
275}
276
277
278/**
279 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
280 * EFLAGS and EIP prior to resuming guest execution.
281 *
282 * All general register not given as a parameter will be set to 0. The EFLAGS
283 * register will be set to sane values for C/C++ code execution with interrupts
284 * disabled and IOPL 0.
285 *
286 * @param pVCpu The current virtual CPU.
287 * @param u32EIP The EIP value.
288 * @param u32ESP The ESP value.
289 * @param u32EAX The EAX value.
290 * @param u32EDX The EDX value.
291 */
292VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
293{
294 pVCpu->cpum.s.Hyper.eip = u32EIP;
295 pVCpu->cpum.s.Hyper.esp = u32ESP;
296 pVCpu->cpum.s.Hyper.eax = u32EAX;
297 pVCpu->cpum.s.Hyper.edx = u32EDX;
298 pVCpu->cpum.s.Hyper.ecx = 0;
299 pVCpu->cpum.s.Hyper.ebx = 0;
300 pVCpu->cpum.s.Hyper.ebp = 0;
301 pVCpu->cpum.s.Hyper.esi = 0;
302 pVCpu->cpum.s.Hyper.edi = 0;
303 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
304}
305
306
307VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
308{
309 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
310}
311
312
313VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
314{
315 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
316}
317
318
319/** @MAYBE_LOAD_DRx
320 * Macro for updating DRx values in raw-mode and ring-0 contexts.
321 */
322#ifdef IN_RING0
323# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
324# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
325# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
326 do { \
327 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
328 a_fnLoad(a_uValue); \
329 else \
330 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
331 } while (0)
332# else
333# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
334 do { \
335 /** @todo we're not loading the correct guest value here! */ \
336 a_fnLoad(a_uValue); \
337 } while (0)
338# endif
339# else
340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
341 do { \
342 a_fnLoad(a_uValue); \
343 } while (0)
344# endif
345
346#elif defined(IN_RC)
347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
348 do { \
349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
350 { a_fnLoad(a_uValue); } \
351 } while (0)
352
353#else
354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
355#endif
356
357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
358{
359 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
361}
362
363
364VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
365{
366 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
368}
369
370
371VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
372{
373 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
375}
376
377
378VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
379{
380 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
382}
383
384
385VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
386{
387 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
388}
389
390
391VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
392{
393 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
394#ifdef IN_RC
395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
396#endif
397}
398
399
400VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
401{
402 return pVCpu->cpum.s.Hyper.cs.Sel;
403}
404
405
406VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
407{
408 return pVCpu->cpum.s.Hyper.ds.Sel;
409}
410
411
412VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
413{
414 return pVCpu->cpum.s.Hyper.es.Sel;
415}
416
417
418VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
419{
420 return pVCpu->cpum.s.Hyper.fs.Sel;
421}
422
423
424VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
425{
426 return pVCpu->cpum.s.Hyper.gs.Sel;
427}
428
429
430VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
431{
432 return pVCpu->cpum.s.Hyper.ss.Sel;
433}
434
435
436VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
437{
438 return pVCpu->cpum.s.Hyper.eax;
439}
440
441
442VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
443{
444 return pVCpu->cpum.s.Hyper.ebx;
445}
446
447
448VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
449{
450 return pVCpu->cpum.s.Hyper.ecx;
451}
452
453
454VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
455{
456 return pVCpu->cpum.s.Hyper.edx;
457}
458
459
460VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
461{
462 return pVCpu->cpum.s.Hyper.esi;
463}
464
465
466VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
467{
468 return pVCpu->cpum.s.Hyper.edi;
469}
470
471
472VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
473{
474 return pVCpu->cpum.s.Hyper.ebp;
475}
476
477
478VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.esp;
481}
482
483
484VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.eflags.u32;
487}
488
489
490VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.eip;
493}
494
495
496VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.rip;
499}
500
501
502VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
503{
504 if (pcbLimit)
505 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
506 return pVCpu->cpum.s.Hyper.idtr.pIdt;
507}
508
509
510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
511{
512 if (pcbLimit)
513 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
514 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
515}
516
517
518VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
519{
520 return pVCpu->cpum.s.Hyper.ldtr.Sel;
521}
522
523
524VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
525{
526 return pVCpu->cpum.s.Hyper.dr[0];
527}
528
529
530VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
531{
532 return pVCpu->cpum.s.Hyper.dr[1];
533}
534
535
536VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
537{
538 return pVCpu->cpum.s.Hyper.dr[2];
539}
540
541
542VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
543{
544 return pVCpu->cpum.s.Hyper.dr[3];
545}
546
547
548VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
549{
550 return pVCpu->cpum.s.Hyper.dr[6];
551}
552
553
554VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
555{
556 return pVCpu->cpum.s.Hyper.dr[7];
557}
558
559
560/**
561 * Gets the pointer to the internal CPUMCTXCORE structure.
562 * This is only for reading in order to save a few calls.
563 *
564 * @param pVCpu Handle to the virtual cpu.
565 */
566VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
567{
568 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
569}
570
571
572/**
573 * Queries the pointer to the internal CPUMCTX structure.
574 *
575 * @returns The CPUMCTX pointer.
576 * @param pVCpu Handle to the virtual cpu.
577 */
578VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
579{
580 return &pVCpu->cpum.s.Guest;
581}
582
583VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
584{
585#ifdef VBOX_WITH_IEM
586# ifdef VBOX_WITH_RAW_MODE_NOT_R0
587 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
588 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
589# endif
590#endif
591 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
592 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
593 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
594 return VINF_SUCCESS; /* formality, consider it void. */
595}
596
597VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
598{
599#ifdef VBOX_WITH_IEM
600# ifdef VBOX_WITH_RAW_MODE_NOT_R0
601 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
602 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
603# endif
604#endif
605 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
606 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
607 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
608 return VINF_SUCCESS; /* formality, consider it void. */
609}
610
611VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
612{
613#ifdef VBOX_WITH_IEM
614# ifdef VBOX_WITH_RAW_MODE_NOT_R0
615 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
617# endif
618#endif
619 pVCpu->cpum.s.Guest.tr.Sel = tr;
620 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
621 return VINF_SUCCESS; /* formality, consider it void. */
622}
623
624VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
625{
626#ifdef VBOX_WITH_IEM
627# ifdef VBOX_WITH_RAW_MODE_NOT_R0
628 if ( ( ldtr != 0
629 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
630 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
631 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
632# endif
633#endif
634 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
635 /* The caller will set more hidden bits if it has them. */
636 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
637 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
638 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
639 return VINF_SUCCESS; /* formality, consider it void. */
640}
641
642
643/**
644 * Set the guest CR0.
645 *
646 * When called in GC, the hyper CR0 may be updated if that is
647 * required. The caller only has to take special action if AM,
648 * WP, PG or PE changes.
649 *
650 * @returns VINF_SUCCESS (consider it void).
651 * @param pVCpu Handle to the virtual cpu.
652 * @param cr0 The new CR0 value.
653 */
654VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
655{
656#ifdef IN_RC
657 /*
658 * Check if we need to change hypervisor CR0 because
659 * of math stuff.
660 */
661 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
662 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
663 {
664 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
665 {
666 /*
667 * We haven't saved the host FPU state yet, so TS and MT are both set
668 * and EM should be reflecting the guest EM (it always does this).
669 */
670 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
671 {
672 uint32_t HyperCR0 = ASMGetCR0();
673 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
674 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
675 HyperCR0 &= ~X86_CR0_EM;
676 HyperCR0 |= cr0 & X86_CR0_EM;
677 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
678 ASMSetCR0(HyperCR0);
679 }
680# ifdef VBOX_STRICT
681 else
682 {
683 uint32_t HyperCR0 = ASMGetCR0();
684 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
685 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
686 }
687# endif
688 }
689 else
690 {
691 /*
692 * Already saved the state, so we're just mirroring
693 * the guest flags.
694 */
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
697 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
698 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
699 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
700 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
701 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
702 ASMSetCR0(HyperCR0);
703 }
704 }
705#endif /* IN_RC */
706
707 /*
708 * Check for changes causing TLB flushes (for REM).
709 * The caller is responsible for calling PGM when appropriate.
710 */
711 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
712 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
713 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
714 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
715
716 /*
717 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
718 */
719 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
720 PGMCr0WpEnabled(pVCpu);
721
722 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
723 return VINF_SUCCESS;
724}
725
726
727VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
728{
729 pVCpu->cpum.s.Guest.cr2 = cr2;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
735{
736 pVCpu->cpum.s.Guest.cr3 = cr3;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
743{
744 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
745 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
746 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
747 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
748 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
749 cr4 &= ~X86_CR4_OSFSXR;
750 pVCpu->cpum.s.Guest.cr4 = cr4;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
756{
757 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
763{
764 pVCpu->cpum.s.Guest.eip = eip;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
770{
771 pVCpu->cpum.s.Guest.eax = eax;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
777{
778 pVCpu->cpum.s.Guest.ebx = ebx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
784{
785 pVCpu->cpum.s.Guest.ecx = ecx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
791{
792 pVCpu->cpum.s.Guest.edx = edx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
798{
799 pVCpu->cpum.s.Guest.esp = esp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
805{
806 pVCpu->cpum.s.Guest.ebp = ebp;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
812{
813 pVCpu->cpum.s.Guest.esi = esi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
819{
820 pVCpu->cpum.s.Guest.edi = edi;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
826{
827 pVCpu->cpum.s.Guest.ss.Sel = ss;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
833{
834 pVCpu->cpum.s.Guest.cs.Sel = cs;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
840{
841 pVCpu->cpum.s.Guest.ds.Sel = ds;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
847{
848 pVCpu->cpum.s.Guest.es.Sel = es;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
854{
855 pVCpu->cpum.s.Guest.fs.Sel = fs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
861{
862 pVCpu->cpum.s.Guest.gs.Sel = gs;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
868{
869 pVCpu->cpum.s.Guest.msrEFER = val;
870}
871
872
873/**
874 * Worker for CPUMQueryGuestMsr().
875 *
876 * @retval VINF_SUCCESS
877 * @retval VERR_CPUM_RAISE_GP_0
878 * @param pVCpu The cross context CPU structure.
879 * @param idMsr The MSR to read.
880 * @param puValue Where to store the return value.
881 */
882static int cpumQueryGuestMsrInt(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
883{
884 /*
885 * If we don't indicate MSR support in the CPUID feature bits, indicate
886 * that a #GP(0) should be raised.
887 */
888 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
889 {
890 *puValue = 0;
891 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
892 }
893
894 int rc = VINF_SUCCESS;
895 uint8_t const u8Multiplier = 4;
896 switch (idMsr)
897 {
898 case MSR_IA32_TSC:
899 *puValue = TMCpuTickGet(pVCpu);
900 break;
901
902 case MSR_IA32_APICBASE:
903 {
904 /* See @bugref{7097} comment 6. */
905 PVM pVM = pVCpu->CTX_SUFF(pVM);
906 if (PDMHasApic(pVM))
907 *puValue = pVCpu->cpum.s.Guest.msrApicBase;
908 else
909 {
910 rc = VERR_CPUM_RAISE_GP_0;
911 *puValue = 0;
912 }
913 break;
914 }
915
916 case MSR_IA32_CR_PAT:
917 *puValue = pVCpu->cpum.s.Guest.msrPAT;
918 break;
919
920 case MSR_IA32_SYSENTER_CS:
921 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
922 break;
923
924 case MSR_IA32_SYSENTER_EIP:
925 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
926 break;
927
928 case MSR_IA32_SYSENTER_ESP:
929 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
930 break;
931
932 case MSR_IA32_MTRR_CAP:
933 {
934 /* This is currently a bit weird. :-) */
935 uint8_t const cVariableRangeRegs = 0;
936 bool const fSystemManagementRangeRegisters = false;
937 bool const fFixedRangeRegisters = false;
938 bool const fWriteCombiningType = false;
939 *puValue = cVariableRangeRegs
940 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
941 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
942 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
943 break;
944 }
945
946 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:
947 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:
948 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:
949 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:
950 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:
951 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:
952 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:
953 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:
954 /** @todo implement variable MTRRs. */
955 *puValue = 0;
956 break;
957#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */
958 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:
959 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:
960 *puValue = 0;
961 break;
962#endif
963
964 case MSR_IA32_MTRR_DEF_TYPE:
965 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
966 break;
967
968 case IA32_MTRR_FIX64K_00000:
969 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
970 break;
971 case IA32_MTRR_FIX16K_80000:
972 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
973 break;
974 case IA32_MTRR_FIX16K_A0000:
975 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
976 break;
977 case IA32_MTRR_FIX4K_C0000:
978 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
979 break;
980 case IA32_MTRR_FIX4K_C8000:
981 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
982 break;
983 case IA32_MTRR_FIX4K_D0000:
984 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
985 break;
986 case IA32_MTRR_FIX4K_D8000:
987 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
988 break;
989 case IA32_MTRR_FIX4K_E0000:
990 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
991 break;
992 case IA32_MTRR_FIX4K_E8000:
993 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
994 break;
995 case IA32_MTRR_FIX4K_F0000:
996 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
997 break;
998 case IA32_MTRR_FIX4K_F8000:
999 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
1000 break;
1001
1002 case MSR_K6_EFER:
1003 *puValue = pVCpu->cpum.s.Guest.msrEFER;
1004 break;
1005
1006 case MSR_K8_SF_MASK:
1007 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
1008 break;
1009
1010 case MSR_K6_STAR:
1011 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
1012 break;
1013
1014 case MSR_K8_LSTAR:
1015 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
1016 break;
1017
1018 case MSR_K8_CSTAR:
1019 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
1020 break;
1021
1022 case MSR_K8_FS_BASE:
1023 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
1024 break;
1025
1026 case MSR_K8_GS_BASE:
1027 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
1028 break;
1029
1030 case MSR_K8_KERNEL_GS_BASE:
1031 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
1032 break;
1033
1034 case MSR_K8_TSC_AUX:
1035 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
1036 break;
1037
1038 case MSR_IA32_PERF_STATUS:
1039 /** @todo could really be not exactly correct, maybe use host's values
1040 * Apple code indicates that we should use CPU Hz / 1.333MHz here. */
1041 /** @todo Where are the specs implemented here found? */
1042 *puValue = UINT64_C(1000) /* TSC increment by tick */
1043 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
1044 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
1045 break;
1046
1047 case MSR_IA32_FSB_CLOCK_STS:
1048 /*
1049 * Encoded as:
1050 * 0 - 266
1051 * 1 - 133
1052 * 2 - 200
1053 * 3 - return 166
1054 * 5 - return 100
1055 */
1056 *puValue = (2 << 4);
1057 break;
1058
1059 case MSR_IA32_PLATFORM_INFO:
1060 *puValue = ((uint32_t)u8Multiplier << 8) /* Flex ratio max */
1061 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
1062 break;
1063
1064 case MSR_IA32_THERM_STATUS:
1065 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
1066 *puValue = RT_BIT(31) /* validity bit */
1067 | (UINT64_C(20) << 16) /* degrees till TCC */;
1068 break;
1069
1070 case MSR_IA32_MISC_ENABLE:
1071#if 0
1072 /* Needs to be tested more before enabling. */
1073 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
1074#else
1075 /* Currenty we don't allow guests to modify enable MSRs. */
1076 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
1077
1078 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
1079
1080 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
1081 /** @todo: add more cpuid-controlled features this way. */
1082#endif
1083 break;
1084
1085 /** @todo virtualize DEBUGCTL and relatives */
1086 case MSR_IA32_DEBUGCTL:
1087 *puValue = 0;
1088 break;
1089
1090#if 0 /*def IN_RING0 */
1091 case MSR_IA32_PLATFORM_ID:
1092 case MSR_IA32_BIOS_SIGN_ID:
1093 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
1094 {
1095 /* Available since the P6 family. VT-x implies that this feature is present. */
1096 if (idMsr == MSR_IA32_PLATFORM_ID)
1097 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
1098 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
1099 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
1100 break;
1101 }
1102 /* no break */
1103#endif
1104 /*
1105 * The BIOS_SIGN_ID MSR and MSR_IA32_MCP_CAP et al exist on AMD64 as
1106 * well, at least bulldozer have them. Windows 7 is querying them.
1107 * XP has been observed querying MSR_IA32_MC0_CTL.
1108 * XP64 has been observed querying MSR_P4_LASTBRANCH_0 (also on AMD).
1109 */
1110 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1111 case MSR_IA32_MCG_CAP: /* fam/mod >= 6_01 */
1112 case MSR_IA32_MCG_STATUS: /* indicated as not present in CAP */
1113 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */
1114 case MSR_IA32_MC0_CTL:
1115 case MSR_IA32_MC0_STATUS:
1116 case MSR_P4_LASTBRANCH_0:
1117 case MSR_P4_LASTBRANCH_1:
1118 case MSR_P4_LASTBRANCH_2:
1119 case MSR_P4_LASTBRANCH_3:
1120 *puValue = 0;
1121 break;
1122
1123
1124 /*
1125 * Intel specifics MSRs:
1126 */
1127 case MSR_P5_MC_ADDR:
1128 case MSR_P5_MC_TYPE:
1129 case MSR_P4_LASTBRANCH_TOS: /** @todo Are these branch regs still here on more recent CPUs? The documentation doesn't mention them for several archs. */
1130 case MSR_IA32_PERFEVTSEL0: /* NetWare 6.5 wants the these four. (Bet on AMD as well.) */
1131 case MSR_IA32_PERFEVTSEL1:
1132 case MSR_IA32_PMC0:
1133 case MSR_IA32_PMC1:
1134 case MSR_IA32_PLATFORM_ID: /* fam/mod >= 6_01 */
1135 case MSR_IA32_MPERF: /* intel_pstate depends on this but does a validation test */
1136 case MSR_IA32_APERF: /* intel_pstate depends on this but does a validation test */
1137 /*case MSR_IA32_BIOS_UPDT_TRIG: - write-only? */
1138 case MSR_RAPL_POWER_UNIT:
1139 case MSR_BBL_CR_CTL3: /* ca. core arch? */
1140 case MSR_PKG_CST_CONFIG_CONTROL: /* Nahalem, Sandy Bridge */
1141 case MSR_CORE_THREAD_COUNT: /* Apple queries this. */
1142 case MSR_FLEX_RATIO: /* Apple queries this. */
1143 *puValue = 0;
1144 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1145 {
1146 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1147 rc = VERR_CPUM_RAISE_GP_0;
1148 break;
1149 }
1150
1151 /* Provide more plausive values for some of them. */
1152 switch (idMsr)
1153 {
1154 case MSR_RAPL_POWER_UNIT:
1155 *puValue = RT_MAKE_U32_FROM_U8(3 /* power units (1/8 W)*/,
1156 16 /* 15.3 micro-Joules */,
1157 10 /* 976 microseconds increments */,
1158 0);
1159 break;
1160 case MSR_BBL_CR_CTL3:
1161 *puValue = RT_MAKE_U32_FROM_U8(1, /* bit 0 - L2 Hardware Enabled. (RO) */
1162 1, /* bit 8 - L2 Enabled (R/W). */
1163 0, /* bit 23 - L2 Not Present (RO). */
1164 0);
1165 break;
1166 case MSR_PKG_CST_CONFIG_CONTROL:
1167 *puValue = pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl;
1168 break;
1169 case MSR_CORE_THREAD_COUNT:
1170 {
1171 /** @todo restrict this to nehalem. */
1172 PVM pVM = pVCpu->CTX_SUFF(pVM); /* Note! Not sweating the 4-bit core count limit on westmere. */
1173 *puValue = (pVM->cCpus & 0xffff) | ((pVM->cCpus & 0xffff) << 16);
1174 break;
1175 }
1176
1177 case MSR_FLEX_RATIO:
1178 {
1179 /** @todo Check for P4, it's different there. Try find accurate specs. */
1180 *puValue = (uint32_t)u8Multiplier << 8;
1181 break;
1182 }
1183 }
1184 break;
1185
1186#if 0 /* Only on pentium CPUs! */
1187 /* Event counters, not supported. */
1188 case MSR_IA32_CESR:
1189 case MSR_IA32_CTR0:
1190 case MSR_IA32_CTR1:
1191 *puValue = 0;
1192 break;
1193#endif
1194
1195
1196 /*
1197 * AMD specific MSRs:
1198 */
1199 case MSR_K8_SYSCFG:
1200 case MSR_K8_INT_PENDING:
1201 case MSR_K8_NB_CFG: /* (All known values are 0 on reset.) */
1202 case MSR_K8_HWCR: /* Very interesting bits here. :) */
1203 case MSR_K8_VM_CR: /* Windows 8 */
1204 case 0xc0011029: /* quick fix for FreeBSd 9.1. */
1205 case 0xc0010042: /* quick fix for something. */
1206 case 0xc001102a: /* quick fix for w2k8 + opposition. */
1207 case 0xc0011004: /* quick fix for the opposition. */
1208 case 0xc0011005: /* quick fix for the opposition. */
1209 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */
1210 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */
1211 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */
1212 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */
1213 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */
1214 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */
1215 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */
1216 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */
1217 *puValue = 0;
1218 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)
1219 {
1220 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1221 return VERR_CPUM_RAISE_GP_0;
1222 }
1223 /* ignored */
1224 break;
1225
1226 default:
1227 /*
1228 * Hand the X2APIC range to PDM and the APIC.
1229 */
1230 if ( idMsr >= MSR_IA32_X2APIC_START
1231 && idMsr <= MSR_IA32_X2APIC_END)
1232 {
1233 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
1234 if (RT_SUCCESS(rc))
1235 rc = VINF_SUCCESS;
1236 else
1237 {
1238 *puValue = 0;
1239 rc = VERR_CPUM_RAISE_GP_0;
1240 }
1241 }
1242 else
1243 {
1244 *puValue = 0;
1245 rc = VERR_CPUM_RAISE_GP_0;
1246 }
1247 break;
1248 }
1249
1250 return rc;
1251}
1252
1253
1254/**
1255 * Query an MSR.
1256 *
1257 * The caller is responsible for checking privilege if the call is the result
1258 * of a RDMSR instruction. We'll do the rest.
1259 *
1260 * @retval VINF_SUCCESS on success.
1261 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
1262 * expected to take the appropriate actions. @a *puValue is set to 0.
1263 * @param pVCpu Pointer to the VMCPU.
1264 * @param idMsr The MSR.
1265 * @param puValue Where to return the value.
1266 *
1267 * @remarks This will always return the right values, even when we're in the
1268 * recompiler.
1269 */
1270VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
1271{
1272 int rc = cpumQueryGuestMsrInt(pVCpu, idMsr, puValue);
1273 LogFlow(("CPUMQueryGuestMsr: %#x -> %llx rc=%d\n", idMsr, *puValue, rc));
1274 return rc;
1275}
1276
1277
1278/**
1279 * Sets the MSR.
1280 *
1281 * The caller is responsible for checking privilege if the call is the result
1282 * of a WRMSR instruction. We'll do the rest.
1283 *
1284 * @retval VINF_SUCCESS on success.
1285 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
1286 * appropriate actions.
1287 *
1288 * @param pVCpu Pointer to the VMCPU.
1289 * @param idMsr The MSR id.
1290 * @param uValue The value to set.
1291 *
1292 * @remarks Everyone changing MSR values, including the recompiler, shall do it
1293 * by calling this method. This makes sure we have current values and
1294 * that we trigger all the right actions when something changes.
1295 */
1296VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
1297{
1298 LogFlow(("CPUMSetGuestMsr: %#x <- %#llx\n", idMsr, uValue));
1299
1300 /*
1301 * If we don't indicate MSR support in the CPUID feature bits, indicate
1302 * that a #GP(0) should be raised.
1303 */
1304 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
1305 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
1306
1307 int rc = VINF_SUCCESS;
1308 switch (idMsr)
1309 {
1310 case MSR_IA32_MISC_ENABLE:
1311 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
1312 break;
1313
1314 case MSR_IA32_TSC:
1315 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
1316 break;
1317
1318 case MSR_IA32_APICBASE:
1319 rc = PDMApicSetBase(pVCpu, uValue);
1320 if (rc != VINF_SUCCESS)
1321 rc = VERR_CPUM_RAISE_GP_0;
1322 break;
1323
1324 case MSR_IA32_CR_PAT:
1325 pVCpu->cpum.s.Guest.msrPAT = uValue;
1326 break;
1327
1328 case MSR_IA32_SYSENTER_CS:
1329 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
1330 break;
1331
1332 case MSR_IA32_SYSENTER_EIP:
1333 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
1334 break;
1335
1336 case MSR_IA32_SYSENTER_ESP:
1337 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
1338 break;
1339
1340 case MSR_IA32_MTRR_CAP:
1341 return VERR_CPUM_RAISE_GP_0;
1342
1343 case MSR_IA32_MTRR_DEF_TYPE:
1344 if ( (uValue & UINT64_C(0xfffffffffffff300))
1345 || ( (uValue & 0xff) != 0
1346 && (uValue & 0xff) != 1
1347 && (uValue & 0xff) != 4
1348 && (uValue & 0xff) != 5
1349 && (uValue & 0xff) != 6) )
1350 {
1351 Log(("CPUM: MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
1352 return VERR_CPUM_RAISE_GP_0;
1353 }
1354 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
1355 break;
1356
1357 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:
1358 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:
1359 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:
1360 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:
1361 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:
1362 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:
1363 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:
1364 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:
1365 /** @todo implement variable MTRRs. */
1366 break;
1367#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */
1368 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:
1369 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:
1370 break;
1371#endif
1372
1373 case IA32_MTRR_FIX64K_00000:
1374 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1375 break;
1376 case IA32_MTRR_FIX16K_80000:
1377 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1378 break;
1379 case IA32_MTRR_FIX16K_A0000:
1380 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1381 break;
1382 case IA32_MTRR_FIX4K_C0000:
1383 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1384 break;
1385 case IA32_MTRR_FIX4K_C8000:
1386 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1387 break;
1388 case IA32_MTRR_FIX4K_D0000:
1389 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1390 break;
1391 case IA32_MTRR_FIX4K_D8000:
1392 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1393 break;
1394 case IA32_MTRR_FIX4K_E0000:
1395 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1396 break;
1397 case IA32_MTRR_FIX4K_E8000:
1398 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1399 break;
1400 case IA32_MTRR_FIX4K_F0000:
1401 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1402 break;
1403 case IA32_MTRR_FIX4K_F8000:
1404 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1405 break;
1406
1407 /*
1408 * AMD64 MSRs.
1409 */
1410 case MSR_K6_EFER:
1411 {
1412 PVM pVM = pVCpu->CTX_SUFF(pVM);
1413 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1414 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1415 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1416 : 0;
1417 uint64_t fMask = 0;
1418
1419 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1420 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
1421 fMask |= MSR_K6_EFER_NXE;
1422 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1423 fMask |= MSR_K6_EFER_LME;
1424 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
1425 fMask |= MSR_K6_EFER_SCE;
1426 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1427 fMask |= MSR_K6_EFER_FFXSR;
1428
1429 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1430 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1431 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1432 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1433 {
1434 Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1435 return VERR_CPUM_RAISE_GP_0;
1436 }
1437
1438 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1439 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1440 ("Unexpected value %RX64\n", uValue));
1441 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1442
1443 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1444 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1445 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1446 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1447 {
1448 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1449 HMFlushTLB(pVCpu);
1450
1451 /* Notify PGM about NXE changes. */
1452 if ( (uOldEFER & MSR_K6_EFER_NXE)
1453 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1454 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1455 }
1456 break;
1457 }
1458
1459 case MSR_K8_SF_MASK:
1460 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1461 break;
1462
1463 case MSR_K6_STAR:
1464 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1465 break;
1466
1467 case MSR_K8_LSTAR:
1468 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1469 break;
1470
1471 case MSR_K8_CSTAR:
1472 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1473 break;
1474
1475 case MSR_K8_FS_BASE:
1476 pVCpu->cpum.s.Guest.fs.u64Base = uValue;
1477 break;
1478
1479 case MSR_K8_GS_BASE:
1480 pVCpu->cpum.s.Guest.gs.u64Base = uValue;
1481 break;
1482
1483 case MSR_K8_KERNEL_GS_BASE:
1484 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1485 break;
1486
1487 case MSR_K8_TSC_AUX:
1488 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1489 break;
1490
1491 case MSR_IA32_DEBUGCTL:
1492 /** @todo virtualize DEBUGCTL and relatives */
1493 break;
1494
1495 /*
1496 * Intel specifics MSRs:
1497 */
1498 /*case MSR_IA32_PLATFORM_ID: - read-only */
1499 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1500 case MSR_IA32_BIOS_UPDT_TRIG: /* fam/mod >= 6_01 */
1501 /*case MSR_IA32_MCP_CAP: - read-only */
1502 /*case MSR_IA32_MCG_STATUS: - read-only */
1503 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */
1504 /*case MSR_IA32_MC0_CTL: - read-only? */
1505 /*case MSR_IA32_MC0_STATUS: - read-only? */
1506 case MSR_PKG_CST_CONFIG_CONTROL:
1507 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1508 {
1509 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1510 return VERR_CPUM_RAISE_GP_0;
1511 }
1512
1513 switch (idMsr)
1514 {
1515 case MSR_PKG_CST_CONFIG_CONTROL:
1516 {
1517 if (pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl & RT_BIT_64(15))
1518 {
1519 Log(("MSR_PKG_CST_CONFIG_CONTROL: Write protected -> #GP\n"));
1520 return VERR_CPUM_RAISE_GP_0;
1521 }
1522 static uint64_t s_fMask = UINT64_C(0x01f08407); /** @todo Only Nehalem has 24; Only Sandy has 27 and 28. */
1523 static uint64_t s_fGpInvalid = UINT64_C(0xffffffff00ff0000); /** @todo figure out exactly what's off limits. */
1524 if ((uValue & s_fGpInvalid) || (uValue & 7) >= 5)
1525 {
1526 Log(("MSR_PKG_CST_CONFIG_CONTROL: Invalid value %#llx -> #GP\n", uValue));
1527 return VERR_CPUM_RAISE_GP_0;
1528 }
1529 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = uValue & s_fMask;
1530 break;
1531 }
1532
1533 }
1534 /* ignored */
1535 break;
1536
1537 /*
1538 * AMD specific MSRs:
1539 */
1540 case MSR_K8_SYSCFG: /** @todo can be written, but we ignore that for now. */
1541 case MSR_K8_INT_PENDING: /** @todo can be written, but we ignore that for now. */
1542 case MSR_K8_NB_CFG: /** @todo can be written; the apicid swapping might be used and would need saving, but probably unnecessary. */
1543 case 0xc0011029: /* quick fix for FreeBSd 9.1. */
1544 case 0xc0010042: /* quick fix for something. */
1545 case 0xc001102a: /* quick fix for w2k8 + opposition. */
1546 case 0xc0011004: /* quick fix for the opposition. */
1547 case 0xc0011005: /* quick fix for the opposition. */
1548 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */
1549 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */
1550 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */
1551 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */
1552 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */
1553 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */
1554 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */
1555 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */
1556 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)
1557 {
1558 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1559 return VERR_CPUM_RAISE_GP_0;
1560 }
1561 /* ignored */
1562 break;
1563
1564
1565 default:
1566 /*
1567 * Hand the X2APIC range to PDM and the APIC.
1568 */
1569 if ( idMsr >= MSR_IA32_X2APIC_START
1570 && idMsr <= MSR_IA32_X2APIC_END)
1571 {
1572 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1573 if (rc != VINF_SUCCESS)
1574 rc = VERR_CPUM_RAISE_GP_0;
1575 }
1576 else
1577 {
1578 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1579 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1580 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1581 }
1582 break;
1583 }
1584 return rc;
1585}
1586
1587
1588VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1589{
1590 if (pcbLimit)
1591 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1592 return pVCpu->cpum.s.Guest.idtr.pIdt;
1593}
1594
1595
1596VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1597{
1598 if (pHidden)
1599 *pHidden = pVCpu->cpum.s.Guest.tr;
1600 return pVCpu->cpum.s.Guest.tr.Sel;
1601}
1602
1603
1604VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1605{
1606 return pVCpu->cpum.s.Guest.cs.Sel;
1607}
1608
1609
1610VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1611{
1612 return pVCpu->cpum.s.Guest.ds.Sel;
1613}
1614
1615
1616VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1617{
1618 return pVCpu->cpum.s.Guest.es.Sel;
1619}
1620
1621
1622VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1623{
1624 return pVCpu->cpum.s.Guest.fs.Sel;
1625}
1626
1627
1628VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1629{
1630 return pVCpu->cpum.s.Guest.gs.Sel;
1631}
1632
1633
1634VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1635{
1636 return pVCpu->cpum.s.Guest.ss.Sel;
1637}
1638
1639
1640VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1641{
1642 return pVCpu->cpum.s.Guest.ldtr.Sel;
1643}
1644
1645
1646VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
1647{
1648 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
1649 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
1650 return pVCpu->cpum.s.Guest.ldtr.Sel;
1651}
1652
1653
1654VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1655{
1656 return pVCpu->cpum.s.Guest.cr0;
1657}
1658
1659
1660VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1661{
1662 return pVCpu->cpum.s.Guest.cr2;
1663}
1664
1665
1666VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1667{
1668 return pVCpu->cpum.s.Guest.cr3;
1669}
1670
1671
1672VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1673{
1674 return pVCpu->cpum.s.Guest.cr4;
1675}
1676
1677
1678VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1679{
1680 uint64_t u64;
1681 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1682 if (RT_FAILURE(rc))
1683 u64 = 0;
1684 return u64;
1685}
1686
1687
1688VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1689{
1690 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1691}
1692
1693
1694VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1695{
1696 return pVCpu->cpum.s.Guest.eip;
1697}
1698
1699
1700VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1701{
1702 return pVCpu->cpum.s.Guest.rip;
1703}
1704
1705
1706VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1707{
1708 return pVCpu->cpum.s.Guest.eax;
1709}
1710
1711
1712VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1713{
1714 return pVCpu->cpum.s.Guest.ebx;
1715}
1716
1717
1718VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1719{
1720 return pVCpu->cpum.s.Guest.ecx;
1721}
1722
1723
1724VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1725{
1726 return pVCpu->cpum.s.Guest.edx;
1727}
1728
1729
1730VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1731{
1732 return pVCpu->cpum.s.Guest.esi;
1733}
1734
1735
1736VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1737{
1738 return pVCpu->cpum.s.Guest.edi;
1739}
1740
1741
1742VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1743{
1744 return pVCpu->cpum.s.Guest.esp;
1745}
1746
1747
1748VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1749{
1750 return pVCpu->cpum.s.Guest.ebp;
1751}
1752
1753
1754VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1755{
1756 return pVCpu->cpum.s.Guest.eflags.u32;
1757}
1758
1759
1760VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1761{
1762 switch (iReg)
1763 {
1764 case DISCREG_CR0:
1765 *pValue = pVCpu->cpum.s.Guest.cr0;
1766 break;
1767
1768 case DISCREG_CR2:
1769 *pValue = pVCpu->cpum.s.Guest.cr2;
1770 break;
1771
1772 case DISCREG_CR3:
1773 *pValue = pVCpu->cpum.s.Guest.cr3;
1774 break;
1775
1776 case DISCREG_CR4:
1777 *pValue = pVCpu->cpum.s.Guest.cr4;
1778 break;
1779
1780 case DISCREG_CR8:
1781 {
1782 uint8_t u8Tpr;
1783 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1784 if (RT_FAILURE(rc))
1785 {
1786 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1787 *pValue = 0;
1788 return rc;
1789 }
1790 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1791 break;
1792 }
1793
1794 default:
1795 return VERR_INVALID_PARAMETER;
1796 }
1797 return VINF_SUCCESS;
1798}
1799
1800
1801VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1802{
1803 return pVCpu->cpum.s.Guest.dr[0];
1804}
1805
1806
1807VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1808{
1809 return pVCpu->cpum.s.Guest.dr[1];
1810}
1811
1812
1813VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1814{
1815 return pVCpu->cpum.s.Guest.dr[2];
1816}
1817
1818
1819VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1820{
1821 return pVCpu->cpum.s.Guest.dr[3];
1822}
1823
1824
1825VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1826{
1827 return pVCpu->cpum.s.Guest.dr[6];
1828}
1829
1830
1831VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1832{
1833 return pVCpu->cpum.s.Guest.dr[7];
1834}
1835
1836
1837VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1838{
1839 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1840 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1841 if (iReg == 4 || iReg == 5)
1842 iReg += 2;
1843 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1844 return VINF_SUCCESS;
1845}
1846
1847
1848VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1849{
1850 return pVCpu->cpum.s.Guest.msrEFER;
1851}
1852
1853
1854/**
1855 * Gets a CPUID leaf.
1856 *
1857 * @param pVCpu Pointer to the VMCPU.
1858 * @param iLeaf The CPUID leaf to get.
1859 * @param pEax Where to store the EAX value.
1860 * @param pEbx Where to store the EBX value.
1861 * @param pEcx Where to store the ECX value.
1862 * @param pEdx Where to store the EDX value.
1863 */
1864VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1865{
1866 PVM pVM = pVCpu->CTX_SUFF(pVM);
1867
1868 PCCPUMCPUID pCpuId;
1869 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1870 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1871 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1872 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1873 else if ( iLeaf - UINT32_C(0x40000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdHyper)
1874 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP))
1875 pCpuId = &pVM->cpum.s.aGuestCpuIdHyper[iLeaf - UINT32_C(0x40000000)]; /* Only report if HVP bit set. */
1876 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1877 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1878 else
1879 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1880
1881 uint32_t cCurrentCacheIndex = *pEcx;
1882
1883 *pEax = pCpuId->eax;
1884 *pEbx = pCpuId->ebx;
1885 *pEcx = pCpuId->ecx;
1886 *pEdx = pCpuId->edx;
1887
1888 if ( iLeaf == 1)
1889 {
1890 /* Bits 31-24: Initial APIC ID */
1891 Assert(pVCpu->idCpu <= 255);
1892 *pEbx |= (pVCpu->idCpu << 24);
1893 }
1894
1895 if ( iLeaf == 4
1896 && cCurrentCacheIndex < 3
1897 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1898 {
1899 uint32_t type, level, sharing, linesize,
1900 partitions, associativity, sets, cores;
1901
1902 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1903 partitions = 1;
1904 /* Those are only to shut up compiler, as they will always
1905 get overwritten, and compiler should be able to figure that out */
1906 sets = associativity = sharing = level = 1;
1907 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1908 switch (cCurrentCacheIndex)
1909 {
1910 case 0:
1911 type = 1;
1912 level = 1;
1913 sharing = 1;
1914 linesize = 64;
1915 associativity = 8;
1916 sets = 64;
1917 break;
1918 case 1:
1919 level = 1;
1920 type = 2;
1921 sharing = 1;
1922 linesize = 64;
1923 associativity = 8;
1924 sets = 64;
1925 break;
1926 default: /* shut up gcc.*/
1927 AssertFailed();
1928 case 2:
1929 level = 2;
1930 type = 3;
1931 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1932 linesize = 64;
1933 associativity = 24;
1934 sets = 4096;
1935 break;
1936 }
1937
1938 NOREF(type);
1939 *pEax |= ((cores - 1) << 26) |
1940 ((sharing - 1) << 14) |
1941 (level << 5) |
1942 1;
1943 *pEbx = (linesize - 1) |
1944 ((partitions - 1) << 12) |
1945 ((associativity - 1) << 22); /* -1 encoding */
1946 *pEcx = sets - 1;
1947 }
1948
1949 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1950}
1951
1952/**
1953 * Gets a number of standard CPUID leafs.
1954 *
1955 * @returns Number of leafs.
1956 * @param pVM Pointer to the VM.
1957 * @remark Intended for PATM.
1958 */
1959VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1960{
1961 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1962}
1963
1964
1965/**
1966 * Gets a number of extended CPUID leafs.
1967 *
1968 * @returns Number of leafs.
1969 * @param pVM Pointer to the VM.
1970 * @remark Intended for PATM.
1971 */
1972VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1973{
1974 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1975}
1976
1977
1978/**
1979 * Gets a number of centaur CPUID leafs.
1980 *
1981 * @returns Number of leafs.
1982 * @param pVM Pointer to the VM.
1983 * @remark Intended for PATM.
1984 */
1985VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1986{
1987 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1988}
1989
1990
1991/**
1992 * Sets a CPUID feature bit.
1993 *
1994 * @param pVM Pointer to the VM.
1995 * @param enmFeature The feature to set.
1996 */
1997VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1998{
1999 switch (enmFeature)
2000 {
2001 /*
2002 * Set the APIC bit in both feature masks.
2003 */
2004 case CPUMCPUIDFEATURE_APIC:
2005 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2006 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
2007 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2008 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2009 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
2010 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
2011 break;
2012
2013 /*
2014 * Set the x2APIC bit in the standard feature mask.
2015 */
2016 case CPUMCPUIDFEATURE_X2APIC:
2017 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2018 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
2019 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
2020 break;
2021
2022 /*
2023 * Set the sysenter/sysexit bit in the standard feature mask.
2024 * Assumes the caller knows what it's doing! (host must support these)
2025 */
2026 case CPUMCPUIDFEATURE_SEP:
2027 {
2028 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
2029 {
2030 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
2031 return;
2032 }
2033
2034 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2035 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
2036 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
2037 break;
2038 }
2039
2040 /*
2041 * Set the syscall/sysret bit in the extended feature mask.
2042 * Assumes the caller knows what it's doing! (host must support these)
2043 */
2044 case CPUMCPUIDFEATURE_SYSCALL:
2045 {
2046 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2047 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
2048 {
2049#if HC_ARCH_BITS == 32
2050 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
2051 * Even when the cpu is capable of doing so in 64 bits mode.
2052 */
2053 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2054 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
2055 || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
2056#endif
2057 {
2058 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
2059 return;
2060 }
2061 }
2062 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
2063 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
2064 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
2065 break;
2066 }
2067
2068 /*
2069 * Set the PAE bit in both feature masks.
2070 * Assumes the caller knows what it's doing! (host must support these)
2071 */
2072 case CPUMCPUIDFEATURE_PAE:
2073 {
2074 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
2075 {
2076 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
2077 return;
2078 }
2079
2080 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2081 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
2082 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2083 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2084 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
2085 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
2086 break;
2087 }
2088
2089 /*
2090 * Set the LONG MODE bit in the extended feature mask.
2091 * Assumes the caller knows what it's doing! (host must support these)
2092 */
2093 case CPUMCPUIDFEATURE_LONG_MODE:
2094 {
2095 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2096 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2097 {
2098 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
2099 return;
2100 }
2101
2102 /* Valid for both Intel and AMD. */
2103 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2104 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
2105 break;
2106 }
2107
2108 /*
2109 * Set the NX/XD bit in the extended feature mask.
2110 * Assumes the caller knows what it's doing! (host must support these)
2111 */
2112 case CPUMCPUIDFEATURE_NX:
2113 {
2114 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2115 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
2116 {
2117 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
2118 return;
2119 }
2120
2121 /* Valid for both Intel and AMD. */
2122 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
2123 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
2124 break;
2125 }
2126
2127 /*
2128 * Set the LAHF/SAHF support in 64-bit mode.
2129 * Assumes the caller knows what it's doing! (host must support this)
2130 */
2131 case CPUMCPUIDFEATURE_LAHF:
2132 {
2133 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2134 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
2135 {
2136 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
2137 return;
2138 }
2139
2140 /* Valid for both Intel and AMD. */
2141 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2142 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
2143 break;
2144 }
2145
2146 case CPUMCPUIDFEATURE_PAT:
2147 {
2148 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2149 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
2150 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2151 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2152 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
2153 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
2154 break;
2155 }
2156
2157 /*
2158 * Set the RDTSCP support bit.
2159 * Assumes the caller knows what it's doing! (host must support this)
2160 */
2161 case CPUMCPUIDFEATURE_RDTSCP:
2162 {
2163 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
2164 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
2165 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
2166 {
2167 if (!pVM->cpum.s.u8PortableCpuIdLevel)
2168 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
2169 return;
2170 }
2171
2172 /* Valid for both Intel and AMD. */
2173 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2174 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
2175 break;
2176 }
2177
2178 /*
2179 * Set the Hypervisor Present bit in the standard feature mask.
2180 */
2181 case CPUMCPUIDFEATURE_HVP:
2182 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2183 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
2184 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
2185 break;
2186
2187 default:
2188 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2189 break;
2190 }
2191 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2192 {
2193 PVMCPU pVCpu = &pVM->aCpus[i];
2194 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2195 }
2196}
2197
2198
2199/**
2200 * Queries a CPUID feature bit.
2201 *
2202 * @returns boolean for feature presence
2203 * @param pVM Pointer to the VM.
2204 * @param enmFeature The feature to query.
2205 */
2206VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2207{
2208 switch (enmFeature)
2209 {
2210 case CPUMCPUIDFEATURE_PAE:
2211 {
2212 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2213 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
2214 break;
2215 }
2216
2217 case CPUMCPUIDFEATURE_NX:
2218 {
2219 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2220 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
2221 }
2222
2223 case CPUMCPUIDFEATURE_SYSCALL:
2224 {
2225 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2226 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
2227 }
2228
2229 case CPUMCPUIDFEATURE_RDTSCP:
2230 {
2231 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2232 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
2233 break;
2234 }
2235
2236 case CPUMCPUIDFEATURE_LONG_MODE:
2237 {
2238 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2239 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
2240 break;
2241 }
2242
2243 default:
2244 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2245 break;
2246 }
2247 return false;
2248}
2249
2250
2251/**
2252 * Clears a CPUID feature bit.
2253 *
2254 * @param pVM Pointer to the VM.
2255 * @param enmFeature The feature to clear.
2256 */
2257VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
2258{
2259 switch (enmFeature)
2260 {
2261 /*
2262 * Set the APIC bit in both feature masks.
2263 */
2264 case CPUMCPUIDFEATURE_APIC:
2265 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2266 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
2267 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2268 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2269 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
2270 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
2271 break;
2272
2273 /*
2274 * Clear the x2APIC bit in the standard feature mask.
2275 */
2276 case CPUMCPUIDFEATURE_X2APIC:
2277 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2278 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
2279 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
2280 break;
2281
2282 case CPUMCPUIDFEATURE_PAE:
2283 {
2284 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2285 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
2286 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2287 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2288 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
2289 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
2290 break;
2291 }
2292
2293 case CPUMCPUIDFEATURE_PAT:
2294 {
2295 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2296 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
2297 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2298 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2299 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
2300 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
2301 break;
2302 }
2303
2304 case CPUMCPUIDFEATURE_LONG_MODE:
2305 {
2306 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2307 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2308 break;
2309 }
2310
2311 case CPUMCPUIDFEATURE_LAHF:
2312 {
2313 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2314 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2315 break;
2316 }
2317
2318 case CPUMCPUIDFEATURE_RDTSCP:
2319 {
2320 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2321 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2322 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
2323 break;
2324 }
2325
2326 case CPUMCPUIDFEATURE_HVP:
2327 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2328 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
2329 break;
2330
2331 default:
2332 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2333 break;
2334 }
2335 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2336 {
2337 PVMCPU pVCpu = &pVM->aCpus[i];
2338 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2339 }
2340}
2341
2342
2343/**
2344 * Gets the host CPU vendor.
2345 *
2346 * @returns CPU vendor.
2347 * @param pVM Pointer to the VM.
2348 */
2349VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
2350{
2351 return pVM->cpum.s.enmHostCpuVendor;
2352}
2353
2354
2355/**
2356 * Gets the CPU vendor.
2357 *
2358 * @returns CPU vendor.
2359 * @param pVM Pointer to the VM.
2360 */
2361VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
2362{
2363 return pVM->cpum.s.enmGuestCpuVendor;
2364}
2365
2366
2367VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
2368{
2369 pVCpu->cpum.s.Guest.dr[0] = uDr0;
2370 return CPUMRecalcHyperDRx(pVCpu, 0, false);
2371}
2372
2373
2374VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
2375{
2376 pVCpu->cpum.s.Guest.dr[1] = uDr1;
2377 return CPUMRecalcHyperDRx(pVCpu, 1, false);
2378}
2379
2380
2381VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
2382{
2383 pVCpu->cpum.s.Guest.dr[2] = uDr2;
2384 return CPUMRecalcHyperDRx(pVCpu, 2, false);
2385}
2386
2387
2388VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
2389{
2390 pVCpu->cpum.s.Guest.dr[3] = uDr3;
2391 return CPUMRecalcHyperDRx(pVCpu, 3, false);
2392}
2393
2394
2395VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
2396{
2397 pVCpu->cpum.s.Guest.dr[6] = uDr6;
2398 return VINF_SUCCESS; /* No need to recalc. */
2399}
2400
2401
2402VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
2403{
2404 pVCpu->cpum.s.Guest.dr[7] = uDr7;
2405 return CPUMRecalcHyperDRx(pVCpu, 7, false);
2406}
2407
2408
2409VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
2410{
2411 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
2412 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
2413 if (iReg == 4 || iReg == 5)
2414 iReg += 2;
2415 pVCpu->cpum.s.Guest.dr[iReg] = Value;
2416 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
2417}
2418
2419
2420/**
2421 * Recalculates the hypervisor DRx register values based on current guest
2422 * registers and DBGF breakpoints, updating changed registers depending on the
2423 * context.
2424 *
2425 * This is called whenever a guest DRx register is modified (any context) and
2426 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
2427 *
2428 * In raw-mode context this function will reload any (hyper) DRx registers which
2429 * comes out with a different value. It may also have to save the host debug
2430 * registers if that haven't been done already. In this context though, we'll
2431 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
2432 * are only important when breakpoints are actually enabled.
2433 *
2434 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
2435 * reloaded by the HM code if it changes. Further more, we will only use the
2436 * combined register set when the VBox debugger is actually using hardware BPs,
2437 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
2438 * concern us here).
2439 *
2440 * In ring-3 we won't be loading anything, so well calculate hypervisor values
2441 * all the time.
2442 *
2443 * @returns VINF_SUCCESS.
2444 * @param pVCpu Pointer to the VMCPU.
2445 * @param iGstReg The guest debug register number that was modified.
2446 * UINT8_MAX if not guest register.
2447 * @param fForceHyper Used in HM to force hyper registers because of single
2448 * stepping.
2449 */
2450VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
2451{
2452 PVM pVM = pVCpu->CTX_SUFF(pVM);
2453
2454 /*
2455 * Compare the DR7s first.
2456 *
2457 * We only care about the enabled flags. GD is virtualized when we
2458 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
2459 * always have the LE and GE bits set, so no need to check and disable
2460 * stuff if they're cleared like we have to for the guest DR7.
2461 */
2462 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
2463 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
2464 uGstDr7 = 0;
2465 else if (!(uGstDr7 & X86_DR7_LE))
2466 uGstDr7 &= ~X86_DR7_LE_ALL;
2467 else if (!(uGstDr7 & X86_DR7_GE))
2468 uGstDr7 &= ~X86_DR7_GE_ALL;
2469
2470 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
2471
2472#ifdef IN_RING0
2473 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
2474 fForceHyper = true;
2475#endif
2476 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
2477 {
2478 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2479#ifdef IN_RC
2480 bool const fHmEnabled = false;
2481#elif defined(IN_RING3)
2482 bool const fHmEnabled = HMIsEnabled(pVM);
2483#endif
2484
2485 /*
2486 * Ok, something is enabled. Recalc each of the breakpoints, taking
2487 * the VM debugger ones of the guest ones. In raw-mode context we will
2488 * not allow breakpoints with values inside the hypervisor area.
2489 */
2490 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
2491
2492 /* bp 0 */
2493 RTGCUINTREG uNewDr0;
2494 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
2495 {
2496 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2497 uNewDr0 = DBGFBpGetDR0(pVM);
2498 }
2499 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
2500 {
2501 uNewDr0 = CPUMGetGuestDR0(pVCpu);
2502#ifndef IN_RING0
2503 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
2504 uNewDr0 = 0;
2505 else
2506#endif
2507 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2508 }
2509 else
2510 uNewDr0 = 0;
2511
2512 /* bp 1 */
2513 RTGCUINTREG uNewDr1;
2514 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
2515 {
2516 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2517 uNewDr1 = DBGFBpGetDR1(pVM);
2518 }
2519 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2520 {
2521 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2522#ifndef IN_RING0
2523 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
2524 uNewDr1 = 0;
2525 else
2526#endif
2527 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2528 }
2529 else
2530 uNewDr1 = 0;
2531
2532 /* bp 2 */
2533 RTGCUINTREG uNewDr2;
2534 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2535 {
2536 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2537 uNewDr2 = DBGFBpGetDR2(pVM);
2538 }
2539 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2540 {
2541 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2542#ifndef IN_RING0
2543 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
2544 uNewDr2 = 0;
2545 else
2546#endif
2547 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2548 }
2549 else
2550 uNewDr2 = 0;
2551
2552 /* bp 3 */
2553 RTGCUINTREG uNewDr3;
2554 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2555 {
2556 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2557 uNewDr3 = DBGFBpGetDR3(pVM);
2558 }
2559 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2560 {
2561 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2562#ifndef IN_RING0
2563 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
2564 uNewDr3 = 0;
2565 else
2566#endif
2567 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2568 }
2569 else
2570 uNewDr3 = 0;
2571
2572 /*
2573 * Apply the updates.
2574 */
2575#ifdef IN_RC
2576 /* Make sure to save host registers first. */
2577 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
2578 {
2579 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
2580 {
2581 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
2582 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
2583 }
2584 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
2585 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
2586 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
2587 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
2588 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
2589
2590 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
2591 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
2592 ASMSetDR0(uNewDr0);
2593 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2594 ASMSetDR1(uNewDr1);
2595 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2596 ASMSetDR2(uNewDr2);
2597 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2598 ASMSetDR3(uNewDr3);
2599 ASMSetDR6(X86_DR6_INIT_VAL);
2600 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2601 ASMSetDR7(uNewDr7);
2602 }
2603 else
2604#endif
2605 {
2606 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2607 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2608 CPUMSetHyperDR3(pVCpu, uNewDr3);
2609 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2610 CPUMSetHyperDR2(pVCpu, uNewDr2);
2611 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2612 CPUMSetHyperDR1(pVCpu, uNewDr1);
2613 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2614 CPUMSetHyperDR0(pVCpu, uNewDr0);
2615 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2616 CPUMSetHyperDR7(pVCpu, uNewDr7);
2617 }
2618 }
2619#ifdef IN_RING0
2620 else if (CPUMIsGuestDebugStateActive(pVCpu))
2621 {
2622 /*
2623 * Reload the register that was modified. Normally this won't happen
2624 * as we won't intercept DRx writes when not having the hyper debug
2625 * state loaded, but in case we do for some reason we'll simply deal
2626 * with it.
2627 */
2628 switch (iGstReg)
2629 {
2630 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2631 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2632 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2633 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2634 default:
2635 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2636 }
2637 }
2638#endif
2639 else
2640 {
2641 /*
2642 * No active debug state any more. In raw-mode this means we have to
2643 * make sure DR7 has everything disabled now, if we armed it already.
2644 * In ring-0 we might end up here when just single stepping.
2645 */
2646#if defined(IN_RC) || defined(IN_RING0)
2647 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2648 {
2649# ifdef IN_RC
2650 ASMSetDR7(X86_DR7_INIT_VAL);
2651# endif
2652 if (pVCpu->cpum.s.Hyper.dr[0])
2653 ASMSetDR0(0);
2654 if (pVCpu->cpum.s.Hyper.dr[1])
2655 ASMSetDR1(0);
2656 if (pVCpu->cpum.s.Hyper.dr[2])
2657 ASMSetDR2(0);
2658 if (pVCpu->cpum.s.Hyper.dr[3])
2659 ASMSetDR3(0);
2660 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2661 }
2662#endif
2663 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2664
2665 /* Clear all the registers. */
2666 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2667 pVCpu->cpum.s.Hyper.dr[3] = 0;
2668 pVCpu->cpum.s.Hyper.dr[2] = 0;
2669 pVCpu->cpum.s.Hyper.dr[1] = 0;
2670 pVCpu->cpum.s.Hyper.dr[0] = 0;
2671
2672 }
2673 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2674 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2675 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2676 pVCpu->cpum.s.Hyper.dr[7]));
2677
2678 return VINF_SUCCESS;
2679}
2680
2681
2682/**
2683 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2684 *
2685 * @returns true if in real mode, otherwise false.
2686 * @param pVCpu Pointer to the VMCPU.
2687 */
2688VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2689{
2690 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2691}
2692
2693
2694/**
2695 * Tests if the guest has the Page Size Extension enabled (PSE).
2696 *
2697 * @returns true if in real mode, otherwise false.
2698 * @param pVCpu Pointer to the VMCPU.
2699 */
2700VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2701{
2702 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2703 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2704}
2705
2706
2707/**
2708 * Tests if the guest has the paging enabled (PG).
2709 *
2710 * @returns true if in real mode, otherwise false.
2711 * @param pVCpu Pointer to the VMCPU.
2712 */
2713VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2714{
2715 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2716}
2717
2718
2719/**
2720 * Tests if the guest has the paging enabled (PG).
2721 *
2722 * @returns true if in real mode, otherwise false.
2723 * @param pVCpu Pointer to the VMCPU.
2724 */
2725VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2726{
2727 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2728}
2729
2730
2731/**
2732 * Tests if the guest is running in real mode or not.
2733 *
2734 * @returns true if in real mode, otherwise false.
2735 * @param pVCpu Pointer to the VMCPU.
2736 */
2737VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2738{
2739 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2740}
2741
2742
2743/**
2744 * Tests if the guest is running in real or virtual 8086 mode.
2745 *
2746 * @returns @c true if it is, @c false if not.
2747 * @param pVCpu Pointer to the VMCPU.
2748 */
2749VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2750{
2751 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2752 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2753}
2754
2755
2756/**
2757 * Tests if the guest is running in protected or not.
2758 *
2759 * @returns true if in protected mode, otherwise false.
2760 * @param pVCpu Pointer to the VMCPU.
2761 */
2762VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2763{
2764 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2765}
2766
2767
2768/**
2769 * Tests if the guest is running in paged protected or not.
2770 *
2771 * @returns true if in paged protected mode, otherwise false.
2772 * @param pVCpu Pointer to the VMCPU.
2773 */
2774VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2775{
2776 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2777}
2778
2779
2780/**
2781 * Tests if the guest is running in long mode or not.
2782 *
2783 * @returns true if in long mode, otherwise false.
2784 * @param pVCpu Pointer to the VMCPU.
2785 */
2786VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2787{
2788 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2789}
2790
2791
2792/**
2793 * Tests if the guest is running in PAE mode or not.
2794 *
2795 * @returns true if in PAE mode, otherwise false.
2796 * @param pVCpu Pointer to the VMCPU.
2797 */
2798VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2799{
2800 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2801 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2802 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LME);
2803}
2804
2805
2806/**
2807 * Tests if the guest is running in 64 bits mode or not.
2808 *
2809 * @returns true if in 64 bits protected mode, otherwise false.
2810 * @param pVCpu The current virtual CPU.
2811 */
2812VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2813{
2814 if (!CPUMIsGuestInLongMode(pVCpu))
2815 return false;
2816 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2817 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2818}
2819
2820
2821/**
2822 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2823 * registers.
2824 *
2825 * @returns true if in 64 bits protected mode, otherwise false.
2826 * @param pCtx Pointer to the current guest CPU context.
2827 */
2828VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2829{
2830 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2831}
2832
2833#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2834
2835/**
2836 *
2837 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2838 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2839 * @param pVCpu The current virtual CPU.
2840 */
2841VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2842{
2843 return pVCpu->cpum.s.fRawEntered;
2844}
2845
2846/**
2847 * Transforms the guest CPU state to raw-ring mode.
2848 *
2849 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2850 *
2851 * @returns VBox status. (recompiler failure)
2852 * @param pVCpu Pointer to the VMCPU.
2853 * @param pCtxCore The context core (for trap usage).
2854 * @see @ref pg_raw
2855 */
2856VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2857{
2858 PVM pVM = pVCpu->CTX_SUFF(pVM);
2859
2860 Assert(!pVCpu->cpum.s.fRawEntered);
2861 Assert(!pVCpu->cpum.s.fRemEntered);
2862 if (!pCtxCore)
2863 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
2864
2865 /*
2866 * Are we in Ring-0?
2867 */
2868 if ( pCtxCore->ss.Sel
2869 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
2870 && !pCtxCore->eflags.Bits.u1VM)
2871 {
2872 /*
2873 * Enter execution mode.
2874 */
2875 PATMRawEnter(pVM, pCtxCore);
2876
2877 /*
2878 * Set CPL to Ring-1.
2879 */
2880 pCtxCore->ss.Sel |= 1;
2881 if ( pCtxCore->cs.Sel
2882 && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
2883 pCtxCore->cs.Sel |= 1;
2884 }
2885 else
2886 {
2887# ifdef VBOX_WITH_RAW_RING1
2888 if ( EMIsRawRing1Enabled(pVM)
2889 && !pCtxCore->eflags.Bits.u1VM
2890 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
2891 {
2892 /* Set CPL to Ring-2. */
2893 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
2894 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2895 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
2896 }
2897# else
2898 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
2899 ("ring-1 code not supported\n"));
2900# endif
2901 /*
2902 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2903 */
2904 PATMRawEnter(pVM, pCtxCore);
2905 }
2906
2907 /*
2908 * Assert sanity.
2909 */
2910 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2911 AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
2912 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2913 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2914
2915 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
2916
2917 pVCpu->cpum.s.fRawEntered = true;
2918 return VINF_SUCCESS;
2919}
2920
2921
2922/**
2923 * Transforms the guest CPU state from raw-ring mode to correct values.
2924 *
2925 * This function will change any selector registers with DPL=1 to DPL=0.
2926 *
2927 * @returns Adjusted rc.
2928 * @param pVCpu Pointer to the VMCPU.
2929 * @param rc Raw mode return code
2930 * @param pCtxCore The context core (for trap usage).
2931 * @see @ref pg_raw
2932 */
2933VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
2934{
2935 PVM pVM = pVCpu->CTX_SUFF(pVM);
2936
2937 /*
2938 * Don't leave if we've already left (in RC).
2939 */
2940 Assert(!pVCpu->cpum.s.fRemEntered);
2941 if (!pVCpu->cpum.s.fRawEntered)
2942 return rc;
2943 pVCpu->cpum.s.fRawEntered = false;
2944
2945 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2946 if (!pCtxCore)
2947 pCtxCore = CPUMCTX2CORE(pCtx);
2948 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
2949 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
2950 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2951
2952 /*
2953 * Are we executing in raw ring-1?
2954 */
2955 if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
2956 && !pCtxCore->eflags.Bits.u1VM)
2957 {
2958 /*
2959 * Leave execution mode.
2960 */
2961 PATMRawLeave(pVM, pCtxCore, rc);
2962 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2963 /** @todo See what happens if we remove this. */
2964 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
2965 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
2966 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
2967 pCtxCore->es.Sel &= ~X86_SEL_RPL;
2968 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
2969 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
2970 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
2971 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
2972
2973 /*
2974 * Ring-1 selector => Ring-0.
2975 */
2976 pCtxCore->ss.Sel &= ~X86_SEL_RPL;
2977 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2978 pCtxCore->cs.Sel &= ~X86_SEL_RPL;
2979 }
2980 else
2981 {
2982 /*
2983 * PATM is taking care of the IOPL and IF flags for us.
2984 */
2985 PATMRawLeave(pVM, pCtxCore, rc);
2986 if (!pCtxCore->eflags.Bits.u1VM)
2987 {
2988# ifdef VBOX_WITH_RAW_RING1
2989 if ( EMIsRawRing1Enabled(pVM)
2990 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)
2991 {
2992 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2993 /** @todo See what happens if we remove this. */
2994 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2)
2995 pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1;
2996 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2)
2997 pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1;
2998 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2)
2999 pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1;
3000 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2)
3001 pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1;
3002
3003 /*
3004 * Ring-2 selector => Ring-1.
3005 */
3006 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1;
3007 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2)
3008 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1;
3009 }
3010 else
3011 {
3012# endif
3013 /** @todo See what happens if we remove this. */
3014 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
3015 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
3016 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
3017 pCtxCore->es.Sel &= ~X86_SEL_RPL;
3018 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
3019 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
3020 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
3021 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
3022# ifdef VBOX_WITH_RAW_RING1
3023 }
3024# endif
3025 }
3026 }
3027
3028 return rc;
3029}
3030
3031#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3032
3033/**
3034 * Updates the EFLAGS while we're in raw-mode.
3035 *
3036 * @param pVCpu Pointer to the VMCPU.
3037 * @param fEfl The new EFLAGS value.
3038 */
3039VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
3040{
3041#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3042 if (pVCpu->cpum.s.fRawEntered)
3043 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
3044 else
3045#endif
3046 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
3047}
3048
3049
3050/**
3051 * Gets the EFLAGS while we're in raw-mode.
3052 *
3053 * @returns The eflags.
3054 * @param pVCpu Pointer to the current virtual CPU.
3055 */
3056VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
3057{
3058#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3059 if (pVCpu->cpum.s.fRawEntered)
3060 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
3061#endif
3062 return pVCpu->cpum.s.Guest.eflags.u32;
3063}
3064
3065
3066/**
3067 * Sets the specified changed flags (CPUM_CHANGED_*).
3068 *
3069 * @param pVCpu Pointer to the current virtual CPU.
3070 */
3071VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
3072{
3073 pVCpu->cpum.s.fChanged |= fChangedFlags;
3074}
3075
3076
3077/**
3078 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
3079 * @returns true if supported.
3080 * @returns false if not supported.
3081 * @param pVM Pointer to the VM.
3082 */
3083VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
3084{
3085 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
3086}
3087
3088
3089/**
3090 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
3091 * @returns true if used.
3092 * @returns false if not used.
3093 * @param pVM Pointer to the VM.
3094 */
3095VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
3096{
3097 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
3098}
3099
3100
3101/**
3102 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
3103 * @returns true if used.
3104 * @returns false if not used.
3105 * @param pVM Pointer to the VM.
3106 */
3107VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
3108{
3109 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
3110}
3111
3112#ifdef IN_RC
3113
3114/**
3115 * Lazily sync in the FPU/XMM state.
3116 *
3117 * @returns VBox status code.
3118 * @param pVCpu Pointer to the VMCPU.
3119 */
3120VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
3121{
3122 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
3123}
3124
3125#endif /* !IN_RC */
3126
3127/**
3128 * Checks if we activated the FPU/XMM state of the guest OS.
3129 * @returns true if we did.
3130 * @returns false if not.
3131 * @param pVCpu Pointer to the VMCPU.
3132 */
3133VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
3134{
3135 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
3136}
3137
3138
3139/**
3140 * Deactivate the FPU/XMM state of the guest OS.
3141 * @param pVCpu Pointer to the VMCPU.
3142 *
3143 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled
3144 * FPU state management.
3145 */
3146VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
3147{
3148 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
3149 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
3150}
3151
3152
3153/**
3154 * Checks if the guest debug state is active.
3155 *
3156 * @returns boolean
3157 * @param pVM Pointer to the VMCPU.
3158 */
3159VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
3160{
3161 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
3162}
3163
3164
3165/**
3166 * Checks if the guest debug state is to be made active during the world-switch
3167 * (currently only used for the 32->64 switcher case).
3168 *
3169 * @returns boolean
3170 * @param pVM Pointer to the VMCPU.
3171 */
3172VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
3173{
3174 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
3175}
3176
3177
3178/**
3179 * Checks if the hyper debug state is active.
3180 *
3181 * @returns boolean
3182 * @param pVM Pointer to the VM.
3183 */
3184VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
3185{
3186 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
3187}
3188
3189
3190/**
3191 * Checks if the hyper debug state is to be made active during the world-switch
3192 * (currently only used for the 32->64 switcher case).
3193 *
3194 * @returns boolean
3195 * @param pVM Pointer to the VMCPU.
3196 */
3197VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
3198{
3199 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
3200}
3201
3202
3203/**
3204 * Mark the guest's debug state as inactive.
3205 *
3206 * @returns boolean
3207 * @param pVM Pointer to the VM.
3208 * @todo This API doesn't make sense any more.
3209 */
3210VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
3211{
3212 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
3213}
3214
3215
3216/**
3217 * Get the current privilege level of the guest.
3218 *
3219 * @returns CPL
3220 * @param pVCpu Pointer to the current virtual CPU.
3221 */
3222VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
3223{
3224 /*
3225 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
3226 *
3227 * Note! We used to check CS.DPL here, assuming it was always equal to
3228 * CPL even if a conforming segment was loaded. But this truned out to
3229 * only apply to older AMD-V. With VT-x we had an ACP2 regression
3230 * during install after a far call to ring 2 with VT-x. Then on newer
3231 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
3232 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
3233 *
3234 * So, forget CS.DPL, always use SS.DPL.
3235 *
3236 * Note! The SS RPL is always equal to the CPL, while the CS RPL
3237 * isn't necessarily equal if the segment is conforming.
3238 * See section 4.11.1 in the AMD manual.
3239 *
3240 * Update: Where the heck does it say CS.RPL can differ from CPL other than
3241 * right after real->prot mode switch and when in V8086 mode? That
3242 * section says the RPL specified in a direct transfere (call, jmp,
3243 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
3244 * it would be impossible for an exception handle or the iret
3245 * instruction to figure out whether SS:ESP are part of the frame
3246 * or not. VBox or qemu bug must've lead to this misconception.
3247 *
3248 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
3249 * selector into SS with an RPL other than the CPL when CPL != 3 and
3250 * we're in 64-bit mode. The intel dev box doesn't allow this, on
3251 * RPL = CPL. Weird.
3252 */
3253 uint32_t uCpl;
3254 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
3255 {
3256 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3257 {
3258 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
3259 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
3260 else
3261 {
3262 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
3263#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3264# ifdef VBOX_WITH_RAW_RING1
3265 if (pVCpu->cpum.s.fRawEntered)
3266 {
3267 if ( uCpl == 2
3268 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
3269 uCpl = 1;
3270 else if (uCpl == 1)
3271 uCpl = 0;
3272 }
3273 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
3274# else
3275 if (uCpl == 1)
3276 uCpl = 0;
3277# endif
3278#endif
3279 }
3280 }
3281 else
3282 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
3283 }
3284 else
3285 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
3286 return uCpl;
3287}
3288
3289
3290/**
3291 * Gets the current guest CPU mode.
3292 *
3293 * If paging mode is what you need, check out PGMGetGuestMode().
3294 *
3295 * @returns The CPU mode.
3296 * @param pVCpu Pointer to the VMCPU.
3297 */
3298VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
3299{
3300 CPUMMODE enmMode;
3301 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3302 enmMode = CPUMMODE_REAL;
3303 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3304 enmMode = CPUMMODE_PROTECTED;
3305 else
3306 enmMode = CPUMMODE_LONG;
3307
3308 return enmMode;
3309}
3310
3311
3312/**
3313 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
3314 *
3315 * @returns 16, 32 or 64.
3316 * @param pVCpu The current virtual CPU.
3317 */
3318VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
3319{
3320 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3321 return 16;
3322
3323 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3324 {
3325 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
3326 return 16;
3327 }
3328
3329 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
3330 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
3331 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3332 return 64;
3333
3334 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
3335 return 32;
3336
3337 return 16;
3338}
3339
3340
3341VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
3342{
3343 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
3344 return DISCPUMODE_16BIT;
3345
3346 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
3347 {
3348 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
3349 return DISCPUMODE_16BIT;
3350 }
3351
3352 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
3353 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
3354 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
3355 return DISCPUMODE_64BIT;
3356
3357 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
3358 return DISCPUMODE_32BIT;
3359
3360 return DISCPUMODE_16BIT;
3361}
3362
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette