VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 55000

Last change on this file since 55000 was 55000, checked in by vboxsync, 10 years ago

CPUMCTXCORE elimination.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 80.3 KB
Line 
1/* $Id: CPUMAllRegs.cpp 55000 2015-03-29 16:42:16Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/**
57 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
58 *
59 * @returns Pointer to the Virtual CPU.
60 * @param a_pGuestCtx Pointer to the guest context.
61 */
62#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
63
64/**
65 * Lazily loads the hidden parts of a selector register when using raw-mode.
66 */
67#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
68# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 do \
70 { \
71 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
72 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
73 } while (0)
74#else
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
77#endif
78
79
80
81#ifdef VBOX_WITH_RAW_MODE_NOT_R0
82
83/**
84 * Does the lazy hidden selector register loading.
85 *
86 * @param pVCpu The current Virtual CPU.
87 * @param pSReg The selector register to lazily load hidden parts of.
88 */
89static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
90{
91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
92 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
93 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
94
95 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
96 {
97 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
98 pSReg->Attr.u = 0;
99 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
100 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
101 pSReg->Attr.n.u2Dpl = 3;
102 pSReg->Attr.n.u1Present = 1;
103 pSReg->u32Limit = 0x0000ffff;
104 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
105 pSReg->ValidSel = pSReg->Sel;
106 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
107 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
108 }
109 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
110 {
111 /* Real mode - leave the limit and flags alone here, at least for now. */
112 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
113 pSReg->ValidSel = pSReg->Sel;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 }
116 else
117 {
118 /* Protected mode - get it from the selector descriptor tables. */
119 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
120 {
121 Assert(!CPUMIsGuestInLongMode(pVCpu));
122 pSReg->Sel = 0;
123 pSReg->u64Base = 0;
124 pSReg->u32Limit = 0;
125 pSReg->Attr.u = 0;
126 pSReg->ValidSel = 0;
127 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
128 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
129 }
130 else
131 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
132 }
133}
134
135
136/**
137 * Makes sure the hidden CS and SS selector registers are valid, loading them if
138 * necessary.
139 *
140 * @param pVCpu The current virtual CPU.
141 */
142VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
143{
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
145 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
146}
147
148
149/**
150 * Loads a the hidden parts of a selector register.
151 *
152 * @param pVCpu The current virtual CPU.
153 */
154VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
155{
156 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
157}
158
159#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
160
161
162/**
163 * Obsolete.
164 *
165 * We don't support nested hypervisor context interrupts or traps. Life is much
166 * simpler when we don't. It's also slightly faster at times.
167 *
168 * @param pVM Handle to the virtual machine.
169 */
170VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
171{
172 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
173}
174
175
176/**
177 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
178 *
179 * @param pVCpu Pointer to the VMCPU.
180 */
181VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
182{
183 return &pVCpu->cpum.s.Hyper;
184}
185
186
187VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
188{
189 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
190 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
191}
192
193
194VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
195{
196 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
197 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
198}
199
200
201VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
202{
203 pVCpu->cpum.s.Hyper.cr3 = cr3;
204
205#ifdef IN_RC
206 /* Update the current CR3. */
207 ASMSetCR3(cr3);
208#endif
209}
210
211VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
212{
213 return pVCpu->cpum.s.Hyper.cr3;
214}
215
216
217VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
218{
219 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
220}
221
222
223VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
224{
225 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
226}
227
228
229VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
230{
231 pVCpu->cpum.s.Hyper.es.Sel = SelES;
232}
233
234
235VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
236{
237 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
238}
239
240
241VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
242{
243 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
244}
245
246
247VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
248{
249 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
250}
251
252
253VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
254{
255 pVCpu->cpum.s.Hyper.esp = u32ESP;
256}
257
258
259VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
260{
261 pVCpu->cpum.s.Hyper.esp = u32ESP;
262}
263
264
265VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
266{
267 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
268 return VINF_SUCCESS;
269}
270
271
272VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
273{
274 pVCpu->cpum.s.Hyper.eip = u32EIP;
275}
276
277
278/**
279 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
280 * EFLAGS and EIP prior to resuming guest execution.
281 *
282 * All general register not given as a parameter will be set to 0. The EFLAGS
283 * register will be set to sane values for C/C++ code execution with interrupts
284 * disabled and IOPL 0.
285 *
286 * @param pVCpu The current virtual CPU.
287 * @param u32EIP The EIP value.
288 * @param u32ESP The ESP value.
289 * @param u32EAX The EAX value.
290 * @param u32EDX The EDX value.
291 */
292VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
293{
294 pVCpu->cpum.s.Hyper.eip = u32EIP;
295 pVCpu->cpum.s.Hyper.esp = u32ESP;
296 pVCpu->cpum.s.Hyper.eax = u32EAX;
297 pVCpu->cpum.s.Hyper.edx = u32EDX;
298 pVCpu->cpum.s.Hyper.ecx = 0;
299 pVCpu->cpum.s.Hyper.ebx = 0;
300 pVCpu->cpum.s.Hyper.ebp = 0;
301 pVCpu->cpum.s.Hyper.esi = 0;
302 pVCpu->cpum.s.Hyper.edi = 0;
303 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
304}
305
306
307VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
308{
309 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
310}
311
312
313VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
314{
315 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
316}
317
318
319/** @MAYBE_LOAD_DRx
320 * Macro for updating DRx values in raw-mode and ring-0 contexts.
321 */
322#ifdef IN_RING0
323# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
324# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
325# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
326 do { \
327 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
328 a_fnLoad(a_uValue); \
329 else \
330 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
331 } while (0)
332# else
333# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
334 do { \
335 /** @todo we're not loading the correct guest value here! */ \
336 a_fnLoad(a_uValue); \
337 } while (0)
338# endif
339# else
340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
341 do { \
342 a_fnLoad(a_uValue); \
343 } while (0)
344# endif
345
346#elif defined(IN_RC)
347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
348 do { \
349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
350 { a_fnLoad(a_uValue); } \
351 } while (0)
352
353#else
354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
355#endif
356
357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
358{
359 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
361}
362
363
364VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
365{
366 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
368}
369
370
371VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
372{
373 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
375}
376
377
378VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
379{
380 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
382}
383
384
385VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
386{
387 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
388}
389
390
391VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
392{
393 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
394#ifdef IN_RC
395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
396#endif
397}
398
399
400VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
401{
402 return pVCpu->cpum.s.Hyper.cs.Sel;
403}
404
405
406VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
407{
408 return pVCpu->cpum.s.Hyper.ds.Sel;
409}
410
411
412VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
413{
414 return pVCpu->cpum.s.Hyper.es.Sel;
415}
416
417
418VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
419{
420 return pVCpu->cpum.s.Hyper.fs.Sel;
421}
422
423
424VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
425{
426 return pVCpu->cpum.s.Hyper.gs.Sel;
427}
428
429
430VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
431{
432 return pVCpu->cpum.s.Hyper.ss.Sel;
433}
434
435
436VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
437{
438 return pVCpu->cpum.s.Hyper.eax;
439}
440
441
442VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
443{
444 return pVCpu->cpum.s.Hyper.ebx;
445}
446
447
448VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
449{
450 return pVCpu->cpum.s.Hyper.ecx;
451}
452
453
454VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
455{
456 return pVCpu->cpum.s.Hyper.edx;
457}
458
459
460VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
461{
462 return pVCpu->cpum.s.Hyper.esi;
463}
464
465
466VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
467{
468 return pVCpu->cpum.s.Hyper.edi;
469}
470
471
472VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
473{
474 return pVCpu->cpum.s.Hyper.ebp;
475}
476
477
478VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.esp;
481}
482
483
484VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.eflags.u32;
487}
488
489
490VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.eip;
493}
494
495
496VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.rip;
499}
500
501
502VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
503{
504 if (pcbLimit)
505 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
506 return pVCpu->cpum.s.Hyper.idtr.pIdt;
507}
508
509
510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
511{
512 if (pcbLimit)
513 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
514 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
515}
516
517
518VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
519{
520 return pVCpu->cpum.s.Hyper.ldtr.Sel;
521}
522
523
524VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
525{
526 return pVCpu->cpum.s.Hyper.dr[0];
527}
528
529
530VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
531{
532 return pVCpu->cpum.s.Hyper.dr[1];
533}
534
535
536VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
537{
538 return pVCpu->cpum.s.Hyper.dr[2];
539}
540
541
542VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
543{
544 return pVCpu->cpum.s.Hyper.dr[3];
545}
546
547
548VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
549{
550 return pVCpu->cpum.s.Hyper.dr[6];
551}
552
553
554VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
555{
556 return pVCpu->cpum.s.Hyper.dr[7];
557}
558
559
560/**
561 * Gets the pointer to the internal CPUMCTXCORE structure.
562 * This is only for reading in order to save a few calls.
563 *
564 * @param pVCpu Handle to the virtual cpu.
565 */
566VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
567{
568 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
569}
570
571
572/**
573 * Queries the pointer to the internal CPUMCTX structure.
574 *
575 * @returns The CPUMCTX pointer.
576 * @param pVCpu Handle to the virtual cpu.
577 */
578VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
579{
580 return &pVCpu->cpum.s.Guest;
581}
582
583VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
584{
585#ifdef VBOX_WITH_IEM
586# ifdef VBOX_WITH_RAW_MODE_NOT_R0
587 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
588 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
589# endif
590#endif
591 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
592 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
593 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
594 return VINF_SUCCESS; /* formality, consider it void. */
595}
596
597VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
598{
599#ifdef VBOX_WITH_IEM
600# ifdef VBOX_WITH_RAW_MODE_NOT_R0
601 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
602 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
603# endif
604#endif
605 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
606 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
607 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
608 return VINF_SUCCESS; /* formality, consider it void. */
609}
610
611VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
612{
613#ifdef VBOX_WITH_IEM
614# ifdef VBOX_WITH_RAW_MODE_NOT_R0
615 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
617# endif
618#endif
619 pVCpu->cpum.s.Guest.tr.Sel = tr;
620 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
621 return VINF_SUCCESS; /* formality, consider it void. */
622}
623
624VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
625{
626#ifdef VBOX_WITH_IEM
627# ifdef VBOX_WITH_RAW_MODE_NOT_R0
628 if ( ( ldtr != 0
629 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
630 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
631 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
632# endif
633#endif
634 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
635 /* The caller will set more hidden bits if it has them. */
636 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
637 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
638 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
639 return VINF_SUCCESS; /* formality, consider it void. */
640}
641
642
643/**
644 * Set the guest CR0.
645 *
646 * When called in GC, the hyper CR0 may be updated if that is
647 * required. The caller only has to take special action if AM,
648 * WP, PG or PE changes.
649 *
650 * @returns VINF_SUCCESS (consider it void).
651 * @param pVCpu Handle to the virtual cpu.
652 * @param cr0 The new CR0 value.
653 */
654VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
655{
656#ifdef IN_RC
657 /*
658 * Check if we need to change hypervisor CR0 because
659 * of math stuff.
660 */
661 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
662 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
663 {
664 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
665 {
666 /*
667 * We haven't saved the host FPU state yet, so TS and MT are both set
668 * and EM should be reflecting the guest EM (it always does this).
669 */
670 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
671 {
672 uint32_t HyperCR0 = ASMGetCR0();
673 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
674 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
675 HyperCR0 &= ~X86_CR0_EM;
676 HyperCR0 |= cr0 & X86_CR0_EM;
677 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
678 ASMSetCR0(HyperCR0);
679 }
680# ifdef VBOX_STRICT
681 else
682 {
683 uint32_t HyperCR0 = ASMGetCR0();
684 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
685 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
686 }
687# endif
688 }
689 else
690 {
691 /*
692 * Already saved the state, so we're just mirroring
693 * the guest flags.
694 */
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
697 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
698 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
699 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
700 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
701 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
702 ASMSetCR0(HyperCR0);
703 }
704 }
705#endif /* IN_RC */
706
707 /*
708 * Check for changes causing TLB flushes (for REM).
709 * The caller is responsible for calling PGM when appropriate.
710 */
711 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
712 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
713 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
714 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
715
716 /*
717 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
718 */
719 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
720 PGMCr0WpEnabled(pVCpu);
721
722 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
723 return VINF_SUCCESS;
724}
725
726
727VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
728{
729 pVCpu->cpum.s.Guest.cr2 = cr2;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
735{
736 pVCpu->cpum.s.Guest.cr3 = cr3;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
743{
744 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
745 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
746 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
747 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
748 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
749 cr4 &= ~X86_CR4_OSFXSR;
750 pVCpu->cpum.s.Guest.cr4 = cr4;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
756{
757 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
763{
764 pVCpu->cpum.s.Guest.eip = eip;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
770{
771 pVCpu->cpum.s.Guest.eax = eax;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
777{
778 pVCpu->cpum.s.Guest.ebx = ebx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
784{
785 pVCpu->cpum.s.Guest.ecx = ecx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
791{
792 pVCpu->cpum.s.Guest.edx = edx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
798{
799 pVCpu->cpum.s.Guest.esp = esp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
805{
806 pVCpu->cpum.s.Guest.ebp = ebp;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
812{
813 pVCpu->cpum.s.Guest.esi = esi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
819{
820 pVCpu->cpum.s.Guest.edi = edi;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
826{
827 pVCpu->cpum.s.Guest.ss.Sel = ss;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
833{
834 pVCpu->cpum.s.Guest.cs.Sel = cs;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
840{
841 pVCpu->cpum.s.Guest.ds.Sel = ds;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
847{
848 pVCpu->cpum.s.Guest.es.Sel = es;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
854{
855 pVCpu->cpum.s.Guest.fs.Sel = fs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
861{
862 pVCpu->cpum.s.Guest.gs.Sel = gs;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
868{
869 pVCpu->cpum.s.Guest.msrEFER = val;
870}
871
872
873VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
874{
875 if (pcbLimit)
876 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
877 return pVCpu->cpum.s.Guest.idtr.pIdt;
878}
879
880
881VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
882{
883 if (pHidden)
884 *pHidden = pVCpu->cpum.s.Guest.tr;
885 return pVCpu->cpum.s.Guest.tr.Sel;
886}
887
888
889VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
890{
891 return pVCpu->cpum.s.Guest.cs.Sel;
892}
893
894
895VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
896{
897 return pVCpu->cpum.s.Guest.ds.Sel;
898}
899
900
901VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
902{
903 return pVCpu->cpum.s.Guest.es.Sel;
904}
905
906
907VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
908{
909 return pVCpu->cpum.s.Guest.fs.Sel;
910}
911
912
913VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
914{
915 return pVCpu->cpum.s.Guest.gs.Sel;
916}
917
918
919VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
920{
921 return pVCpu->cpum.s.Guest.ss.Sel;
922}
923
924
925VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
926{
927 return pVCpu->cpum.s.Guest.ldtr.Sel;
928}
929
930
931VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
932{
933 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
934 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
935 return pVCpu->cpum.s.Guest.ldtr.Sel;
936}
937
938
939VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
940{
941 return pVCpu->cpum.s.Guest.cr0;
942}
943
944
945VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
946{
947 return pVCpu->cpum.s.Guest.cr2;
948}
949
950
951VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
952{
953 return pVCpu->cpum.s.Guest.cr3;
954}
955
956
957VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
958{
959 return pVCpu->cpum.s.Guest.cr4;
960}
961
962
963VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
964{
965 uint64_t u64;
966 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
967 if (RT_FAILURE(rc))
968 u64 = 0;
969 return u64;
970}
971
972
973VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
974{
975 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
976}
977
978
979VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
980{
981 return pVCpu->cpum.s.Guest.eip;
982}
983
984
985VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
986{
987 return pVCpu->cpum.s.Guest.rip;
988}
989
990
991VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
992{
993 return pVCpu->cpum.s.Guest.eax;
994}
995
996
997VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
998{
999 return pVCpu->cpum.s.Guest.ebx;
1000}
1001
1002
1003VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1004{
1005 return pVCpu->cpum.s.Guest.ecx;
1006}
1007
1008
1009VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1010{
1011 return pVCpu->cpum.s.Guest.edx;
1012}
1013
1014
1015VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1016{
1017 return pVCpu->cpum.s.Guest.esi;
1018}
1019
1020
1021VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1022{
1023 return pVCpu->cpum.s.Guest.edi;
1024}
1025
1026
1027VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1028{
1029 return pVCpu->cpum.s.Guest.esp;
1030}
1031
1032
1033VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1034{
1035 return pVCpu->cpum.s.Guest.ebp;
1036}
1037
1038
1039VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1040{
1041 return pVCpu->cpum.s.Guest.eflags.u32;
1042}
1043
1044
1045VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1046{
1047 switch (iReg)
1048 {
1049 case DISCREG_CR0:
1050 *pValue = pVCpu->cpum.s.Guest.cr0;
1051 break;
1052
1053 case DISCREG_CR2:
1054 *pValue = pVCpu->cpum.s.Guest.cr2;
1055 break;
1056
1057 case DISCREG_CR3:
1058 *pValue = pVCpu->cpum.s.Guest.cr3;
1059 break;
1060
1061 case DISCREG_CR4:
1062 *pValue = pVCpu->cpum.s.Guest.cr4;
1063 break;
1064
1065 case DISCREG_CR8:
1066 {
1067 uint8_t u8Tpr;
1068 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1069 if (RT_FAILURE(rc))
1070 {
1071 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1072 *pValue = 0;
1073 return rc;
1074 }
1075 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1076 break;
1077 }
1078
1079 default:
1080 return VERR_INVALID_PARAMETER;
1081 }
1082 return VINF_SUCCESS;
1083}
1084
1085
1086VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1087{
1088 return pVCpu->cpum.s.Guest.dr[0];
1089}
1090
1091
1092VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1093{
1094 return pVCpu->cpum.s.Guest.dr[1];
1095}
1096
1097
1098VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1099{
1100 return pVCpu->cpum.s.Guest.dr[2];
1101}
1102
1103
1104VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1105{
1106 return pVCpu->cpum.s.Guest.dr[3];
1107}
1108
1109
1110VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1111{
1112 return pVCpu->cpum.s.Guest.dr[6];
1113}
1114
1115
1116VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1117{
1118 return pVCpu->cpum.s.Guest.dr[7];
1119}
1120
1121
1122VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1123{
1124 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1125 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1126 if (iReg == 4 || iReg == 5)
1127 iReg += 2;
1128 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1129 return VINF_SUCCESS;
1130}
1131
1132
1133VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1134{
1135 return pVCpu->cpum.s.Guest.msrEFER;
1136}
1137
1138
1139/**
1140 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1141 *
1142 * @returns Pointer to the leaf if found, NULL if not.
1143 *
1144 * @param pVM Pointer to the cross context VM structure.
1145 * @param uLeaf The leaf to get.
1146 */
1147PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1148{
1149 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1150 if (iEnd)
1151 {
1152 unsigned iStart = 0;
1153 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1154 for (;;)
1155 {
1156 unsigned i = iStart + (iEnd - iStart) / 2U;
1157 if (uLeaf < paLeaves[i].uLeaf)
1158 {
1159 if (i <= iStart)
1160 return NULL;
1161 iEnd = i;
1162 }
1163 else if (uLeaf > paLeaves[i].uLeaf)
1164 {
1165 i += 1;
1166 if (i >= iEnd)
1167 return NULL;
1168 iStart = i;
1169 }
1170 else
1171 {
1172 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1173 return &paLeaves[i];
1174
1175 /* This shouldn't normally happen. But in case the it does due
1176 to user configuration overrids or something, just return the
1177 first sub-leaf. */
1178 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1179 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1180 while ( paLeaves[i].uSubLeaf != 0
1181 && i > 0
1182 && uLeaf == paLeaves[i - 1].uLeaf)
1183 i--;
1184 return &paLeaves[i];
1185 }
1186 }
1187 }
1188
1189 return NULL;
1190}
1191
1192
1193/**
1194 * Looks up a CPUID leaf in the CPUID leaf array.
1195 *
1196 * @returns Pointer to the leaf if found, NULL if not.
1197 *
1198 * @param pVM Pointer to the cross context VM structure.
1199 * @param uLeaf The leaf to get.
1200 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1201 * isn't.
1202 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1203 */
1204PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1205{
1206 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1207 if (iEnd)
1208 {
1209 unsigned iStart = 0;
1210 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1211 for (;;)
1212 {
1213 unsigned i = iStart + (iEnd - iStart) / 2U;
1214 if (uLeaf < paLeaves[i].uLeaf)
1215 {
1216 if (i <= iStart)
1217 return NULL;
1218 iEnd = i;
1219 }
1220 else if (uLeaf > paLeaves[i].uLeaf)
1221 {
1222 i += 1;
1223 if (i >= iEnd)
1224 return NULL;
1225 iStart = i;
1226 }
1227 else
1228 {
1229 uSubLeaf &= paLeaves[i].fSubLeafMask;
1230 if (uSubLeaf == paLeaves[i].uSubLeaf)
1231 *pfExactSubLeafHit = true;
1232 else
1233 {
1234 /* Find the right subleaf. We return the last one before
1235 uSubLeaf if we don't find an exact match. */
1236 if (uSubLeaf < paLeaves[i].uSubLeaf)
1237 while ( i > 0
1238 && uLeaf == paLeaves[i - 1].uLeaf
1239 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1240 i--;
1241 else
1242 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1243 && uLeaf == paLeaves[i + 1].uLeaf
1244 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1245 i++;
1246 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1247 }
1248 return &paLeaves[i];
1249 }
1250 }
1251 }
1252
1253 *pfExactSubLeafHit = false;
1254 return NULL;
1255}
1256
1257
1258/**
1259 * Gets a CPUID leaf.
1260 *
1261 * @param pVCpu Pointer to the VMCPU.
1262 * @param uLeaf The CPUID leaf to get.
1263 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1264 * @param pEax Where to store the EAX value.
1265 * @param pEbx Where to store the EBX value.
1266 * @param pEcx Where to store the ECX value.
1267 * @param pEdx Where to store the EDX value.
1268 */
1269VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1270 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1271{
1272 bool fExactSubLeafHit;
1273 PVM pVM = pVCpu->CTX_SUFF(pVM);
1274 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1275 if (pLeaf)
1276 {
1277 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x\n", pLeaf->uLeaf, uLeaf));
1278 if (fExactSubLeafHit)
1279 {
1280 *pEax = pLeaf->uEax;
1281 *pEbx = pLeaf->uEbx;
1282 *pEcx = pLeaf->uEcx;
1283 *pEdx = pLeaf->uEdx;
1284
1285 /*
1286 * Deal with CPU specific information (currently only APIC ID).
1287 */
1288 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
1289 {
1290 if (uLeaf == 1)
1291 {
1292 /* Bits 31-24: Initial APIC ID */
1293 Assert(pVCpu->idCpu <= 255);
1294 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1295 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1296 }
1297 else if (uLeaf == 0xb)
1298 {
1299 /* EDX: Initial extended APIC ID. */
1300 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1301 *pEdx = pVCpu->idCpu;
1302 }
1303 else if (uLeaf == UINT32_C(0x8000001e))
1304 {
1305 /* EAX: Initial extended APIC ID. */
1306 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1307 *pEax = pVCpu->idCpu;
1308 }
1309 else
1310 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1311 }
1312 }
1313 /*
1314 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1315 * them here, but we do the best we can here...
1316 */
1317 else
1318 {
1319 *pEax = *pEbx = *pEcx = *pEdx = 0;
1320 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1321 {
1322 *pEcx = uSubLeaf & 0xff;
1323 *pEdx = pVCpu->idCpu;
1324 }
1325 }
1326 }
1327 else
1328 {
1329 /*
1330 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1331 */
1332 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1333 {
1334 default:
1335 AssertFailed();
1336 case CPUMUNKNOWNCPUID_DEFAULTS:
1337 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1338 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1339 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1340 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1341 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1342 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1343 break;
1344 case CPUMUNKNOWNCPUID_PASSTHRU:
1345 *pEax = uLeaf;
1346 *pEbx = 0;
1347 *pEcx = uSubLeaf;
1348 *pEdx = 0;
1349 break;
1350 }
1351 }
1352 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1353}
1354
1355
1356/**
1357 * Sets a CPUID feature bit.
1358 *
1359 * @param pVM Pointer to the VM.
1360 * @param enmFeature The feature to set.
1361 */
1362VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1363{
1364 PCPUMCPUIDLEAF pLeaf;
1365
1366 switch (enmFeature)
1367 {
1368 /*
1369 * Set the APIC bit in both feature masks.
1370 */
1371 case CPUMCPUIDFEATURE_APIC:
1372 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1373 if (pLeaf)
1374 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
1375
1376 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1377 if ( pLeaf
1378 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1379 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1380
1381 pVM->cpum.s.GuestFeatures.fApic = 1;
1382 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
1383 break;
1384
1385 /*
1386 * Set the x2APIC bit in the standard feature mask.
1387 */
1388 case CPUMCPUIDFEATURE_X2APIC:
1389 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1390 if (pLeaf)
1391 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
1392 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
1393 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
1394 break;
1395
1396 /*
1397 * Set the sysenter/sysexit bit in the standard feature mask.
1398 * Assumes the caller knows what it's doing! (host must support these)
1399 */
1400 case CPUMCPUIDFEATURE_SEP:
1401 if (!pVM->cpum.s.HostFeatures.fSysEnter)
1402 {
1403 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1404 return;
1405 }
1406
1407 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1408 if (pLeaf)
1409 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
1410 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
1411 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
1412 break;
1413
1414 /*
1415 * Set the syscall/sysret bit in the extended feature mask.
1416 * Assumes the caller knows what it's doing! (host must support these)
1417 */
1418 case CPUMCPUIDFEATURE_SYSCALL:
1419 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1420 if ( !pLeaf
1421 || !pVM->cpum.s.HostFeatures.fSysCall)
1422 {
1423#if HC_ARCH_BITS == 32
1424 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
1425 mode by Intel, even when the cpu is capable of doing so in
1426 64-bit mode. Long mode requires syscall support. */
1427 if (!pVM->cpum.s.HostFeatures.fLongMode)
1428#endif
1429 {
1430 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
1431 return;
1432 }
1433 }
1434
1435 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1436 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1437 pVM->cpum.s.GuestFeatures.fSysCall = 1;
1438 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
1439 break;
1440
1441 /*
1442 * Set the PAE bit in both feature masks.
1443 * Assumes the caller knows what it's doing! (host must support these)
1444 */
1445 case CPUMCPUIDFEATURE_PAE:
1446 if (!pVM->cpum.s.HostFeatures.fPae)
1447 {
1448 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
1449 return;
1450 }
1451
1452 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1453 if (pLeaf)
1454 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
1455
1456 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1457 if ( pLeaf
1458 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1459 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1460
1461 pVM->cpum.s.GuestFeatures.fPae = 1;
1462 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
1463 break;
1464
1465 /*
1466 * Set the LONG MODE bit in the extended feature mask.
1467 * Assumes the caller knows what it's doing! (host must support these)
1468 */
1469 case CPUMCPUIDFEATURE_LONG_MODE:
1470 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1471 if ( !pLeaf
1472 || !pVM->cpum.s.HostFeatures.fLongMode)
1473 {
1474 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
1475 return;
1476 }
1477
1478 /* Valid for both Intel and AMD. */
1479 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1480 pVM->cpum.s.GuestFeatures.fLongMode = 1;
1481 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
1482 break;
1483
1484 /*
1485 * Set the NX/XD bit in the extended feature mask.
1486 * Assumes the caller knows what it's doing! (host must support these)
1487 */
1488 case CPUMCPUIDFEATURE_NX:
1489 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1490 if ( !pLeaf
1491 || !pVM->cpum.s.HostFeatures.fNoExecute)
1492 {
1493 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
1494 return;
1495 }
1496
1497 /* Valid for both Intel and AMD. */
1498 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1499 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
1500 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
1501 break;
1502
1503
1504 /*
1505 * Set the LAHF/SAHF support in 64-bit mode.
1506 * Assumes the caller knows what it's doing! (host must support this)
1507 */
1508 case CPUMCPUIDFEATURE_LAHF:
1509 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1510 if ( !pLeaf
1511 || !pVM->cpum.s.HostFeatures.fLahfSahf)
1512 {
1513 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
1514 return;
1515 }
1516
1517 /* Valid for both Intel and AMD. */
1518 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1519 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
1520 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1521 break;
1522
1523 /*
1524 * Set the page attribute table bit. This is alternative page level
1525 * cache control that doesn't much matter when everything is
1526 * virtualized, though it may when passing thru device memory.
1527 */
1528 case CPUMCPUIDFEATURE_PAT:
1529 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1530 if (pLeaf)
1531 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
1532
1533 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1534 if ( pLeaf
1535 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1536 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1537
1538 pVM->cpum.s.GuestFeatures.fPat = 1;
1539 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
1540 break;
1541
1542 /*
1543 * Set the RDTSCP support bit.
1544 * Assumes the caller knows what it's doing! (host must support this)
1545 */
1546 case CPUMCPUIDFEATURE_RDTSCP:
1547 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1548 if ( !pLeaf
1549 || !pVM->cpum.s.HostFeatures.fRdTscP
1550 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1551 {
1552 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1553 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
1554 return;
1555 }
1556
1557 /* Valid for both Intel and AMD. */
1558 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1559 pVM->cpum.s.HostFeatures.fRdTscP = 1;
1560 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1561 break;
1562
1563 /*
1564 * Set the Hypervisor Present bit in the standard feature mask.
1565 */
1566 case CPUMCPUIDFEATURE_HVP:
1567 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1568 if (pLeaf)
1569 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
1570 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
1571 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1572 break;
1573
1574 /*
1575 * Set the MWAIT Extensions Present bit in the MWAIT/MONITOR leaf.
1576 * This currently includes the Present bit and MWAITBREAK bit as well.
1577 */
1578 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1579 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1580 if ( !pLeaf
1581 || !pVM->cpum.s.HostFeatures.fMWaitExtensions)
1582 {
1583 LogRel(("CPUM: WARNING! Can't turn on MWAIT Extensions when the host doesn't support it!\n"));
1584 return;
1585 }
1586
1587 /* Valid for both Intel and AMD. */
1588 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1589 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
1590 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
1591 break;
1592
1593 default:
1594 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1595 break;
1596 }
1597
1598 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1599 {
1600 PVMCPU pVCpu = &pVM->aCpus[i];
1601 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1602 }
1603}
1604
1605
1606/**
1607 * Queries a CPUID feature bit.
1608 *
1609 * @returns boolean for feature presence
1610 * @param pVM Pointer to the VM.
1611 * @param enmFeature The feature to query.
1612 */
1613VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1614{
1615 switch (enmFeature)
1616 {
1617 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
1618 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
1619 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
1620 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
1621 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
1622 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
1623 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
1624 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
1625 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat;
1626 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
1627 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
1628 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
1629
1630 case CPUMCPUIDFEATURE_INVALID:
1631 case CPUMCPUIDFEATURE_32BIT_HACK:
1632 break;
1633 }
1634 AssertFailed();
1635 return false;
1636}
1637
1638
1639/**
1640 * Clears a CPUID feature bit.
1641 *
1642 * @param pVM Pointer to the VM.
1643 * @param enmFeature The feature to clear.
1644 */
1645VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1646{
1647 PCPUMCPUIDLEAF pLeaf;
1648 switch (enmFeature)
1649 {
1650 case CPUMCPUIDFEATURE_APIC:
1651 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1652 if (pLeaf)
1653 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1654
1655 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1656 if ( pLeaf
1657 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1658 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1659
1660 pVM->cpum.s.GuestFeatures.fApic = 0;
1661 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
1662 break;
1663
1664 case CPUMCPUIDFEATURE_X2APIC:
1665 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1666 if (pLeaf)
1667 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1668 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
1669 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
1670 break;
1671
1672 case CPUMCPUIDFEATURE_PAE:
1673 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1674 if (pLeaf)
1675 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
1676
1677 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1678 if ( pLeaf
1679 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1680 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1681
1682 pVM->cpum.s.GuestFeatures.fPae = 0;
1683 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
1684 break;
1685
1686 case CPUMCPUIDFEATURE_PAT:
1687 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1688 if (pLeaf)
1689 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
1690
1691 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1692 if ( pLeaf
1693 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1694 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1695
1696 pVM->cpum.s.GuestFeatures.fPat = 0;
1697 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
1698 break;
1699
1700 case CPUMCPUIDFEATURE_LONG_MODE:
1701 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1702 if (pLeaf)
1703 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1704 pVM->cpum.s.GuestFeatures.fLongMode = 0;
1705 break;
1706
1707 case CPUMCPUIDFEATURE_LAHF:
1708 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1709 if (pLeaf)
1710 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1711 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
1712 break;
1713
1714 case CPUMCPUIDFEATURE_RDTSCP:
1715 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1716 if (pLeaf)
1717 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1718 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
1719 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
1720 break;
1721
1722 case CPUMCPUIDFEATURE_HVP:
1723 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1724 if (pLeaf)
1725 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
1726 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
1727 break;
1728
1729 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1730 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1731 if (pLeaf)
1732 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1733 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
1734 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
1735 break;
1736
1737 default:
1738 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1739 break;
1740 }
1741
1742 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1743 {
1744 PVMCPU pVCpu = &pVM->aCpus[i];
1745 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1746 }
1747}
1748
1749
1750/**
1751 * Gets the host CPU vendor.
1752 *
1753 * @returns CPU vendor.
1754 * @param pVM Pointer to the VM.
1755 */
1756VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1757{
1758 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1759}
1760
1761
1762/**
1763 * Gets the CPU vendor.
1764 *
1765 * @returns CPU vendor.
1766 * @param pVM Pointer to the VM.
1767 */
1768VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1769{
1770 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1771}
1772
1773
1774VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1775{
1776 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1777 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1778}
1779
1780
1781VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1782{
1783 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1784 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1785}
1786
1787
1788VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1789{
1790 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1791 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1792}
1793
1794
1795VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1796{
1797 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1798 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1799}
1800
1801
1802VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1803{
1804 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1805 return VINF_SUCCESS; /* No need to recalc. */
1806}
1807
1808
1809VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1810{
1811 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1812 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1813}
1814
1815
1816VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1817{
1818 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1819 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1820 if (iReg == 4 || iReg == 5)
1821 iReg += 2;
1822 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1823 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1824}
1825
1826
1827/**
1828 * Recalculates the hypervisor DRx register values based on current guest
1829 * registers and DBGF breakpoints, updating changed registers depending on the
1830 * context.
1831 *
1832 * This is called whenever a guest DRx register is modified (any context) and
1833 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1834 *
1835 * In raw-mode context this function will reload any (hyper) DRx registers which
1836 * comes out with a different value. It may also have to save the host debug
1837 * registers if that haven't been done already. In this context though, we'll
1838 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1839 * are only important when breakpoints are actually enabled.
1840 *
1841 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1842 * reloaded by the HM code if it changes. Further more, we will only use the
1843 * combined register set when the VBox debugger is actually using hardware BPs,
1844 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1845 * concern us here).
1846 *
1847 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1848 * all the time.
1849 *
1850 * @returns VINF_SUCCESS.
1851 * @param pVCpu Pointer to the VMCPU.
1852 * @param iGstReg The guest debug register number that was modified.
1853 * UINT8_MAX if not guest register.
1854 * @param fForceHyper Used in HM to force hyper registers because of single
1855 * stepping.
1856 */
1857VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1858{
1859 PVM pVM = pVCpu->CTX_SUFF(pVM);
1860
1861 /*
1862 * Compare the DR7s first.
1863 *
1864 * We only care about the enabled flags. GD is virtualized when we
1865 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1866 * always have the LE and GE bits set, so no need to check and disable
1867 * stuff if they're cleared like we have to for the guest DR7.
1868 */
1869 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1870 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1871 uGstDr7 = 0;
1872 else if (!(uGstDr7 & X86_DR7_LE))
1873 uGstDr7 &= ~X86_DR7_LE_ALL;
1874 else if (!(uGstDr7 & X86_DR7_GE))
1875 uGstDr7 &= ~X86_DR7_GE_ALL;
1876
1877 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1878
1879#ifdef IN_RING0
1880 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1881 fForceHyper = true;
1882#endif
1883 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1884 {
1885 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1886#ifdef IN_RC
1887 bool const fHmEnabled = false;
1888#elif defined(IN_RING3)
1889 bool const fHmEnabled = HMIsEnabled(pVM);
1890#endif
1891
1892 /*
1893 * Ok, something is enabled. Recalc each of the breakpoints, taking
1894 * the VM debugger ones of the guest ones. In raw-mode context we will
1895 * not allow breakpoints with values inside the hypervisor area.
1896 */
1897 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1898
1899 /* bp 0 */
1900 RTGCUINTREG uNewDr0;
1901 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1902 {
1903 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1904 uNewDr0 = DBGFBpGetDR0(pVM);
1905 }
1906 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1907 {
1908 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1909#ifndef IN_RING0
1910 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1911 uNewDr0 = 0;
1912 else
1913#endif
1914 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1915 }
1916 else
1917 uNewDr0 = 0;
1918
1919 /* bp 1 */
1920 RTGCUINTREG uNewDr1;
1921 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1922 {
1923 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1924 uNewDr1 = DBGFBpGetDR1(pVM);
1925 }
1926 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1927 {
1928 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1929#ifndef IN_RING0
1930 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1931 uNewDr1 = 0;
1932 else
1933#endif
1934 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1935 }
1936 else
1937 uNewDr1 = 0;
1938
1939 /* bp 2 */
1940 RTGCUINTREG uNewDr2;
1941 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1942 {
1943 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1944 uNewDr2 = DBGFBpGetDR2(pVM);
1945 }
1946 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1947 {
1948 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1949#ifndef IN_RING0
1950 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1951 uNewDr2 = 0;
1952 else
1953#endif
1954 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1955 }
1956 else
1957 uNewDr2 = 0;
1958
1959 /* bp 3 */
1960 RTGCUINTREG uNewDr3;
1961 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1962 {
1963 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1964 uNewDr3 = DBGFBpGetDR3(pVM);
1965 }
1966 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1967 {
1968 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1969#ifndef IN_RING0
1970 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1971 uNewDr3 = 0;
1972 else
1973#endif
1974 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1975 }
1976 else
1977 uNewDr3 = 0;
1978
1979 /*
1980 * Apply the updates.
1981 */
1982#ifdef IN_RC
1983 /* Make sure to save host registers first. */
1984 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1985 {
1986 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1987 {
1988 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1989 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1990 }
1991 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1992 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1993 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1994 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1995 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1996
1997 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1998 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1999 ASMSetDR0(uNewDr0);
2000 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2001 ASMSetDR1(uNewDr1);
2002 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2003 ASMSetDR2(uNewDr2);
2004 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2005 ASMSetDR3(uNewDr3);
2006 ASMSetDR6(X86_DR6_INIT_VAL);
2007 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2008 ASMSetDR7(uNewDr7);
2009 }
2010 else
2011#endif
2012 {
2013 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2014 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2015 CPUMSetHyperDR3(pVCpu, uNewDr3);
2016 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2017 CPUMSetHyperDR2(pVCpu, uNewDr2);
2018 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2019 CPUMSetHyperDR1(pVCpu, uNewDr1);
2020 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2021 CPUMSetHyperDR0(pVCpu, uNewDr0);
2022 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2023 CPUMSetHyperDR7(pVCpu, uNewDr7);
2024 }
2025 }
2026#ifdef IN_RING0
2027 else if (CPUMIsGuestDebugStateActive(pVCpu))
2028 {
2029 /*
2030 * Reload the register that was modified. Normally this won't happen
2031 * as we won't intercept DRx writes when not having the hyper debug
2032 * state loaded, but in case we do for some reason we'll simply deal
2033 * with it.
2034 */
2035 switch (iGstReg)
2036 {
2037 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2038 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2039 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2040 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2041 default:
2042 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2043 }
2044 }
2045#endif
2046 else
2047 {
2048 /*
2049 * No active debug state any more. In raw-mode this means we have to
2050 * make sure DR7 has everything disabled now, if we armed it already.
2051 * In ring-0 we might end up here when just single stepping.
2052 */
2053#if defined(IN_RC) || defined(IN_RING0)
2054 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2055 {
2056# ifdef IN_RC
2057 ASMSetDR7(X86_DR7_INIT_VAL);
2058# endif
2059 if (pVCpu->cpum.s.Hyper.dr[0])
2060 ASMSetDR0(0);
2061 if (pVCpu->cpum.s.Hyper.dr[1])
2062 ASMSetDR1(0);
2063 if (pVCpu->cpum.s.Hyper.dr[2])
2064 ASMSetDR2(0);
2065 if (pVCpu->cpum.s.Hyper.dr[3])
2066 ASMSetDR3(0);
2067 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2068 }
2069#endif
2070 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2071
2072 /* Clear all the registers. */
2073 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2074 pVCpu->cpum.s.Hyper.dr[3] = 0;
2075 pVCpu->cpum.s.Hyper.dr[2] = 0;
2076 pVCpu->cpum.s.Hyper.dr[1] = 0;
2077 pVCpu->cpum.s.Hyper.dr[0] = 0;
2078
2079 }
2080 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2081 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2082 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2083 pVCpu->cpum.s.Hyper.dr[7]));
2084
2085 return VINF_SUCCESS;
2086}
2087
2088
2089/**
2090 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2091 *
2092 * @returns true if in real mode, otherwise false.
2093 * @param pVCpu Pointer to the VMCPU.
2094 */
2095VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2096{
2097 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2098}
2099
2100
2101/**
2102 * Tests if the guest has the Page Size Extension enabled (PSE).
2103 *
2104 * @returns true if in real mode, otherwise false.
2105 * @param pVCpu Pointer to the VMCPU.
2106 */
2107VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2108{
2109 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2110 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2111}
2112
2113
2114/**
2115 * Tests if the guest has the paging enabled (PG).
2116 *
2117 * @returns true if in real mode, otherwise false.
2118 * @param pVCpu Pointer to the VMCPU.
2119 */
2120VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2121{
2122 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2123}
2124
2125
2126/**
2127 * Tests if the guest has the paging enabled (PG).
2128 *
2129 * @returns true if in real mode, otherwise false.
2130 * @param pVCpu Pointer to the VMCPU.
2131 */
2132VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2133{
2134 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2135}
2136
2137
2138/**
2139 * Tests if the guest is running in real mode or not.
2140 *
2141 * @returns true if in real mode, otherwise false.
2142 * @param pVCpu Pointer to the VMCPU.
2143 */
2144VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2145{
2146 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2147}
2148
2149
2150/**
2151 * Tests if the guest is running in real or virtual 8086 mode.
2152 *
2153 * @returns @c true if it is, @c false if not.
2154 * @param pVCpu Pointer to the VMCPU.
2155 */
2156VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2157{
2158 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2159 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2160}
2161
2162
2163/**
2164 * Tests if the guest is running in protected or not.
2165 *
2166 * @returns true if in protected mode, otherwise false.
2167 * @param pVCpu Pointer to the VMCPU.
2168 */
2169VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2170{
2171 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2172}
2173
2174
2175/**
2176 * Tests if the guest is running in paged protected or not.
2177 *
2178 * @returns true if in paged protected mode, otherwise false.
2179 * @param pVCpu Pointer to the VMCPU.
2180 */
2181VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2182{
2183 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2184}
2185
2186
2187/**
2188 * Tests if the guest is running in long mode or not.
2189 *
2190 * @returns true if in long mode, otherwise false.
2191 * @param pVCpu Pointer to the VMCPU.
2192 */
2193VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2194{
2195 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2196}
2197
2198
2199/**
2200 * Tests if the guest is running in PAE mode or not.
2201 *
2202 * @returns true if in PAE mode, otherwise false.
2203 * @param pVCpu Pointer to the VMCPU.
2204 */
2205VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2206{
2207 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2208 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2209 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2210 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2211 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2212}
2213
2214
2215/**
2216 * Tests if the guest is running in 64 bits mode or not.
2217 *
2218 * @returns true if in 64 bits protected mode, otherwise false.
2219 * @param pVCpu The current virtual CPU.
2220 */
2221VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2222{
2223 if (!CPUMIsGuestInLongMode(pVCpu))
2224 return false;
2225 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2226 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2227}
2228
2229
2230/**
2231 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2232 * registers.
2233 *
2234 * @returns true if in 64 bits protected mode, otherwise false.
2235 * @param pCtx Pointer to the current guest CPU context.
2236 */
2237VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2238{
2239 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2240}
2241
2242#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2243
2244/**
2245 *
2246 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2247 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2248 * @param pVCpu The current virtual CPU.
2249 */
2250VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2251{
2252 return pVCpu->cpum.s.fRawEntered;
2253}
2254
2255/**
2256 * Transforms the guest CPU state to raw-ring mode.
2257 *
2258 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2259 *
2260 * @returns VBox status. (recompiler failure)
2261 * @param pVCpu Pointer to the VMCPU.
2262 * @see @ref pg_raw
2263 */
2264VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2265{
2266 PVM pVM = pVCpu->CTX_SUFF(pVM);
2267
2268 Assert(!pVCpu->cpum.s.fRawEntered);
2269 Assert(!pVCpu->cpum.s.fRemEntered);
2270 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2271
2272 /*
2273 * Are we in Ring-0?
2274 */
2275 if ( pCtx->ss.Sel
2276 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2277 && !pCtx->eflags.Bits.u1VM)
2278 {
2279 /*
2280 * Enter execution mode.
2281 */
2282 PATMRawEnter(pVM, pCtx);
2283
2284 /*
2285 * Set CPL to Ring-1.
2286 */
2287 pCtx->ss.Sel |= 1;
2288 if ( pCtx->cs.Sel
2289 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2290 pCtx->cs.Sel |= 1;
2291 }
2292 else
2293 {
2294# ifdef VBOX_WITH_RAW_RING1
2295 if ( EMIsRawRing1Enabled(pVM)
2296 && !pCtx->eflags.Bits.u1VM
2297 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2298 {
2299 /* Set CPL to Ring-2. */
2300 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2301 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2302 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2303 }
2304# else
2305 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2306 ("ring-1 code not supported\n"));
2307# endif
2308 /*
2309 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2310 */
2311 PATMRawEnter(pVM, pCtx);
2312 }
2313
2314 /*
2315 * Assert sanity.
2316 */
2317 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2318 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2319 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2320 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2321
2322 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2323
2324 pVCpu->cpum.s.fRawEntered = true;
2325 return VINF_SUCCESS;
2326}
2327
2328
2329/**
2330 * Transforms the guest CPU state from raw-ring mode to correct values.
2331 *
2332 * This function will change any selector registers with DPL=1 to DPL=0.
2333 *
2334 * @returns Adjusted rc.
2335 * @param pVCpu Pointer to the VMCPU.
2336 * @param rc Raw mode return code
2337 * @see @ref pg_raw
2338 */
2339VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2340{
2341 PVM pVM = pVCpu->CTX_SUFF(pVM);
2342
2343 /*
2344 * Don't leave if we've already left (in RC).
2345 */
2346 Assert(!pVCpu->cpum.s.fRemEntered);
2347 if (!pVCpu->cpum.s.fRawEntered)
2348 return rc;
2349 pVCpu->cpum.s.fRawEntered = false;
2350
2351 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2352 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2353 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2354 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2355
2356 /*
2357 * Are we executing in raw ring-1?
2358 */
2359 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2360 && !pCtx->eflags.Bits.u1VM)
2361 {
2362 /*
2363 * Leave execution mode.
2364 */
2365 PATMRawLeave(pVM, pCtx, rc);
2366 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2367 /** @todo See what happens if we remove this. */
2368 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2369 pCtx->ds.Sel &= ~X86_SEL_RPL;
2370 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2371 pCtx->es.Sel &= ~X86_SEL_RPL;
2372 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2373 pCtx->fs.Sel &= ~X86_SEL_RPL;
2374 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2375 pCtx->gs.Sel &= ~X86_SEL_RPL;
2376
2377 /*
2378 * Ring-1 selector => Ring-0.
2379 */
2380 pCtx->ss.Sel &= ~X86_SEL_RPL;
2381 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2382 pCtx->cs.Sel &= ~X86_SEL_RPL;
2383 }
2384 else
2385 {
2386 /*
2387 * PATM is taking care of the IOPL and IF flags for us.
2388 */
2389 PATMRawLeave(pVM, pCtx, rc);
2390 if (!pCtx->eflags.Bits.u1VM)
2391 {
2392# ifdef VBOX_WITH_RAW_RING1
2393 if ( EMIsRawRing1Enabled(pVM)
2394 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2395 {
2396 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2397 /** @todo See what happens if we remove this. */
2398 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2399 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2400 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2401 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2402 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2403 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2404 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2405 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2406
2407 /*
2408 * Ring-2 selector => Ring-1.
2409 */
2410 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2411 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2412 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2413 }
2414 else
2415 {
2416# endif
2417 /** @todo See what happens if we remove this. */
2418 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2419 pCtx->ds.Sel &= ~X86_SEL_RPL;
2420 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2421 pCtx->es.Sel &= ~X86_SEL_RPL;
2422 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2423 pCtx->fs.Sel &= ~X86_SEL_RPL;
2424 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2425 pCtx->gs.Sel &= ~X86_SEL_RPL;
2426# ifdef VBOX_WITH_RAW_RING1
2427 }
2428# endif
2429 }
2430 }
2431
2432 return rc;
2433}
2434
2435#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2436
2437/**
2438 * Updates the EFLAGS while we're in raw-mode.
2439 *
2440 * @param pVCpu Pointer to the VMCPU.
2441 * @param fEfl The new EFLAGS value.
2442 */
2443VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2444{
2445#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2446 if (pVCpu->cpum.s.fRawEntered)
2447 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2448 else
2449#endif
2450 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2451}
2452
2453
2454/**
2455 * Gets the EFLAGS while we're in raw-mode.
2456 *
2457 * @returns The eflags.
2458 * @param pVCpu Pointer to the current virtual CPU.
2459 */
2460VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2461{
2462#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2463 if (pVCpu->cpum.s.fRawEntered)
2464 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2465#endif
2466 return pVCpu->cpum.s.Guest.eflags.u32;
2467}
2468
2469
2470/**
2471 * Sets the specified changed flags (CPUM_CHANGED_*).
2472 *
2473 * @param pVCpu Pointer to the current virtual CPU.
2474 */
2475VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2476{
2477 pVCpu->cpum.s.fChanged |= fChangedFlags;
2478}
2479
2480
2481/**
2482 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2483 * @returns true if supported.
2484 * @returns false if not supported.
2485 * @param pVM Pointer to the VM.
2486 */
2487VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2488{
2489 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2490}
2491
2492
2493/**
2494 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2495 * @returns true if used.
2496 * @returns false if not used.
2497 * @param pVM Pointer to the VM.
2498 */
2499VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2500{
2501 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2502}
2503
2504
2505/**
2506 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2507 * @returns true if used.
2508 * @returns false if not used.
2509 * @param pVM Pointer to the VM.
2510 */
2511VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2512{
2513 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2514}
2515
2516#ifdef IN_RC
2517
2518/**
2519 * Lazily sync in the FPU/XMM state.
2520 *
2521 * @returns VBox status code.
2522 * @param pVCpu Pointer to the VMCPU.
2523 */
2524VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2525{
2526 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2527}
2528
2529#endif /* !IN_RC */
2530
2531/**
2532 * Checks if we activated the FPU/XMM state of the guest OS.
2533 * @returns true if we did.
2534 * @returns false if not.
2535 * @param pVCpu Pointer to the VMCPU.
2536 */
2537VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2538{
2539 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
2540}
2541
2542
2543/**
2544 * Deactivate the FPU/XMM state of the guest OS.
2545 * @param pVCpu Pointer to the VMCPU.
2546 *
2547 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled
2548 * FPU state management.
2549 */
2550VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2551{
2552 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
2553 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2554}
2555
2556
2557/**
2558 * Checks if the guest debug state is active.
2559 *
2560 * @returns boolean
2561 * @param pVM Pointer to the VMCPU.
2562 */
2563VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2564{
2565 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2566}
2567
2568
2569/**
2570 * Checks if the guest debug state is to be made active during the world-switch
2571 * (currently only used for the 32->64 switcher case).
2572 *
2573 * @returns boolean
2574 * @param pVM Pointer to the VMCPU.
2575 */
2576VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2577{
2578 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2579}
2580
2581
2582/**
2583 * Checks if the hyper debug state is active.
2584 *
2585 * @returns boolean
2586 * @param pVM Pointer to the VM.
2587 */
2588VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2589{
2590 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2591}
2592
2593
2594/**
2595 * Checks if the hyper debug state is to be made active during the world-switch
2596 * (currently only used for the 32->64 switcher case).
2597 *
2598 * @returns boolean
2599 * @param pVM Pointer to the VMCPU.
2600 */
2601VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2602{
2603 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2604}
2605
2606
2607/**
2608 * Mark the guest's debug state as inactive.
2609 *
2610 * @returns boolean
2611 * @param pVM Pointer to the VM.
2612 * @todo This API doesn't make sense any more.
2613 */
2614VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2615{
2616 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2617}
2618
2619
2620/**
2621 * Get the current privilege level of the guest.
2622 *
2623 * @returns CPL
2624 * @param pVCpu Pointer to the current virtual CPU.
2625 */
2626VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2627{
2628 /*
2629 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2630 *
2631 * Note! We used to check CS.DPL here, assuming it was always equal to
2632 * CPL even if a conforming segment was loaded. But this truned out to
2633 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2634 * during install after a far call to ring 2 with VT-x. Then on newer
2635 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2636 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2637 *
2638 * So, forget CS.DPL, always use SS.DPL.
2639 *
2640 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2641 * isn't necessarily equal if the segment is conforming.
2642 * See section 4.11.1 in the AMD manual.
2643 *
2644 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2645 * right after real->prot mode switch and when in V8086 mode? That
2646 * section says the RPL specified in a direct transfere (call, jmp,
2647 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2648 * it would be impossible for an exception handle or the iret
2649 * instruction to figure out whether SS:ESP are part of the frame
2650 * or not. VBox or qemu bug must've lead to this misconception.
2651 *
2652 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2653 * selector into SS with an RPL other than the CPL when CPL != 3 and
2654 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2655 * RPL = CPL. Weird.
2656 */
2657 uint32_t uCpl;
2658 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2659 {
2660 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2661 {
2662 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2663 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2664 else
2665 {
2666 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2667#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2668# ifdef VBOX_WITH_RAW_RING1
2669 if (pVCpu->cpum.s.fRawEntered)
2670 {
2671 if ( uCpl == 2
2672 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2673 uCpl = 1;
2674 else if (uCpl == 1)
2675 uCpl = 0;
2676 }
2677 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2678# else
2679 if (uCpl == 1)
2680 uCpl = 0;
2681# endif
2682#endif
2683 }
2684 }
2685 else
2686 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2687 }
2688 else
2689 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2690 return uCpl;
2691}
2692
2693
2694/**
2695 * Gets the current guest CPU mode.
2696 *
2697 * If paging mode is what you need, check out PGMGetGuestMode().
2698 *
2699 * @returns The CPU mode.
2700 * @param pVCpu Pointer to the VMCPU.
2701 */
2702VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2703{
2704 CPUMMODE enmMode;
2705 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2706 enmMode = CPUMMODE_REAL;
2707 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2708 enmMode = CPUMMODE_PROTECTED;
2709 else
2710 enmMode = CPUMMODE_LONG;
2711
2712 return enmMode;
2713}
2714
2715
2716/**
2717 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2718 *
2719 * @returns 16, 32 or 64.
2720 * @param pVCpu The current virtual CPU.
2721 */
2722VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2723{
2724 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2725 return 16;
2726
2727 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2728 {
2729 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2730 return 16;
2731 }
2732
2733 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2734 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2735 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2736 return 64;
2737
2738 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2739 return 32;
2740
2741 return 16;
2742}
2743
2744
2745VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2746{
2747 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2748 return DISCPUMODE_16BIT;
2749
2750 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2751 {
2752 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2753 return DISCPUMODE_16BIT;
2754 }
2755
2756 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2757 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2758 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2759 return DISCPUMODE_64BIT;
2760
2761 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2762 return DISCPUMODE_32BIT;
2763
2764 return DISCPUMODE_16BIT;
2765}
2766
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette