VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 53626

Last change on this file since 53626 was 53467, checked in by vboxsync, 10 years ago

VMM: Removed VBOX_WITH_NEW_MSR_CODE and the code marked ifndef VBOX_WITH_NEW_MSR_CODE.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 79.7 KB
Line 
1/* $Id: CPUMAllRegs.cpp 53467 2014-12-05 16:10:20Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/**
57 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
58 *
59 * @returns Pointer to the Virtual CPU.
60 * @param a_pGuestCtx Pointer to the guest context.
61 */
62#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
63
64/**
65 * Lazily loads the hidden parts of a selector register when using raw-mode.
66 */
67#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
68# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 do \
70 { \
71 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
72 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
73 } while (0)
74#else
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
77#endif
78
79
80
81#ifdef VBOX_WITH_RAW_MODE_NOT_R0
82
83/**
84 * Does the lazy hidden selector register loading.
85 *
86 * @param pVCpu The current Virtual CPU.
87 * @param pSReg The selector register to lazily load hidden parts of.
88 */
89static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
90{
91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
92 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
93 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
94
95 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
96 {
97 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
98 pSReg->Attr.u = 0;
99 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
100 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
101 pSReg->Attr.n.u2Dpl = 3;
102 pSReg->Attr.n.u1Present = 1;
103 pSReg->u32Limit = 0x0000ffff;
104 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
105 pSReg->ValidSel = pSReg->Sel;
106 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
107 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
108 }
109 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
110 {
111 /* Real mode - leave the limit and flags alone here, at least for now. */
112 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
113 pSReg->ValidSel = pSReg->Sel;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 }
116 else
117 {
118 /* Protected mode - get it from the selector descriptor tables. */
119 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
120 {
121 Assert(!CPUMIsGuestInLongMode(pVCpu));
122 pSReg->Sel = 0;
123 pSReg->u64Base = 0;
124 pSReg->u32Limit = 0;
125 pSReg->Attr.u = 0;
126 pSReg->ValidSel = 0;
127 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
128 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
129 }
130 else
131 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
132 }
133}
134
135
136/**
137 * Makes sure the hidden CS and SS selector registers are valid, loading them if
138 * necessary.
139 *
140 * @param pVCpu The current virtual CPU.
141 */
142VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
143{
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
145 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
146}
147
148
149/**
150 * Loads a the hidden parts of a selector register.
151 *
152 * @param pVCpu The current virtual CPU.
153 */
154VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
155{
156 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
157}
158
159#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
160
161
162/**
163 * Obsolete.
164 *
165 * We don't support nested hypervisor context interrupts or traps. Life is much
166 * simpler when we don't. It's also slightly faster at times.
167 *
168 * @param pVM Handle to the virtual machine.
169 */
170VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
171{
172 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
173}
174
175
176/**
177 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
178 *
179 * @param pVCpu Pointer to the VMCPU.
180 */
181VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
182{
183 return &pVCpu->cpum.s.Hyper;
184}
185
186
187VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
188{
189 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
190 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
191}
192
193
194VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
195{
196 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
197 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
198}
199
200
201VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
202{
203 pVCpu->cpum.s.Hyper.cr3 = cr3;
204
205#ifdef IN_RC
206 /* Update the current CR3. */
207 ASMSetCR3(cr3);
208#endif
209}
210
211VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
212{
213 return pVCpu->cpum.s.Hyper.cr3;
214}
215
216
217VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
218{
219 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
220}
221
222
223VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
224{
225 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
226}
227
228
229VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
230{
231 pVCpu->cpum.s.Hyper.es.Sel = SelES;
232}
233
234
235VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
236{
237 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
238}
239
240
241VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
242{
243 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
244}
245
246
247VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
248{
249 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
250}
251
252
253VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
254{
255 pVCpu->cpum.s.Hyper.esp = u32ESP;
256}
257
258
259VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
260{
261 pVCpu->cpum.s.Hyper.esp = u32ESP;
262}
263
264
265VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
266{
267 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
268 return VINF_SUCCESS;
269}
270
271
272VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
273{
274 pVCpu->cpum.s.Hyper.eip = u32EIP;
275}
276
277
278/**
279 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
280 * EFLAGS and EIP prior to resuming guest execution.
281 *
282 * All general register not given as a parameter will be set to 0. The EFLAGS
283 * register will be set to sane values for C/C++ code execution with interrupts
284 * disabled and IOPL 0.
285 *
286 * @param pVCpu The current virtual CPU.
287 * @param u32EIP The EIP value.
288 * @param u32ESP The ESP value.
289 * @param u32EAX The EAX value.
290 * @param u32EDX The EDX value.
291 */
292VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
293{
294 pVCpu->cpum.s.Hyper.eip = u32EIP;
295 pVCpu->cpum.s.Hyper.esp = u32ESP;
296 pVCpu->cpum.s.Hyper.eax = u32EAX;
297 pVCpu->cpum.s.Hyper.edx = u32EDX;
298 pVCpu->cpum.s.Hyper.ecx = 0;
299 pVCpu->cpum.s.Hyper.ebx = 0;
300 pVCpu->cpum.s.Hyper.ebp = 0;
301 pVCpu->cpum.s.Hyper.esi = 0;
302 pVCpu->cpum.s.Hyper.edi = 0;
303 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
304}
305
306
307VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
308{
309 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
310}
311
312
313VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
314{
315 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
316}
317
318
319/** @MAYBE_LOAD_DRx
320 * Macro for updating DRx values in raw-mode and ring-0 contexts.
321 */
322#ifdef IN_RING0
323# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
324# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
325# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
326 do { \
327 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
328 a_fnLoad(a_uValue); \
329 else \
330 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
331 } while (0)
332# else
333# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
334 do { \
335 /** @todo we're not loading the correct guest value here! */ \
336 a_fnLoad(a_uValue); \
337 } while (0)
338# endif
339# else
340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
341 do { \
342 a_fnLoad(a_uValue); \
343 } while (0)
344# endif
345
346#elif defined(IN_RC)
347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
348 do { \
349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
350 { a_fnLoad(a_uValue); } \
351 } while (0)
352
353#else
354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
355#endif
356
357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
358{
359 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
361}
362
363
364VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
365{
366 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
368}
369
370
371VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
372{
373 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
375}
376
377
378VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
379{
380 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
382}
383
384
385VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
386{
387 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
388}
389
390
391VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
392{
393 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
394#ifdef IN_RC
395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
396#endif
397}
398
399
400VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
401{
402 return pVCpu->cpum.s.Hyper.cs.Sel;
403}
404
405
406VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
407{
408 return pVCpu->cpum.s.Hyper.ds.Sel;
409}
410
411
412VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
413{
414 return pVCpu->cpum.s.Hyper.es.Sel;
415}
416
417
418VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
419{
420 return pVCpu->cpum.s.Hyper.fs.Sel;
421}
422
423
424VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
425{
426 return pVCpu->cpum.s.Hyper.gs.Sel;
427}
428
429
430VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
431{
432 return pVCpu->cpum.s.Hyper.ss.Sel;
433}
434
435
436VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
437{
438 return pVCpu->cpum.s.Hyper.eax;
439}
440
441
442VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
443{
444 return pVCpu->cpum.s.Hyper.ebx;
445}
446
447
448VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
449{
450 return pVCpu->cpum.s.Hyper.ecx;
451}
452
453
454VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
455{
456 return pVCpu->cpum.s.Hyper.edx;
457}
458
459
460VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
461{
462 return pVCpu->cpum.s.Hyper.esi;
463}
464
465
466VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
467{
468 return pVCpu->cpum.s.Hyper.edi;
469}
470
471
472VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
473{
474 return pVCpu->cpum.s.Hyper.ebp;
475}
476
477
478VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.esp;
481}
482
483
484VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.eflags.u32;
487}
488
489
490VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.eip;
493}
494
495
496VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.rip;
499}
500
501
502VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
503{
504 if (pcbLimit)
505 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
506 return pVCpu->cpum.s.Hyper.idtr.pIdt;
507}
508
509
510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
511{
512 if (pcbLimit)
513 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
514 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
515}
516
517
518VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
519{
520 return pVCpu->cpum.s.Hyper.ldtr.Sel;
521}
522
523
524VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
525{
526 return pVCpu->cpum.s.Hyper.dr[0];
527}
528
529
530VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
531{
532 return pVCpu->cpum.s.Hyper.dr[1];
533}
534
535
536VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
537{
538 return pVCpu->cpum.s.Hyper.dr[2];
539}
540
541
542VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
543{
544 return pVCpu->cpum.s.Hyper.dr[3];
545}
546
547
548VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
549{
550 return pVCpu->cpum.s.Hyper.dr[6];
551}
552
553
554VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
555{
556 return pVCpu->cpum.s.Hyper.dr[7];
557}
558
559
560/**
561 * Gets the pointer to the internal CPUMCTXCORE structure.
562 * This is only for reading in order to save a few calls.
563 *
564 * @param pVCpu Handle to the virtual cpu.
565 */
566VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
567{
568 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
569}
570
571
572/**
573 * Queries the pointer to the internal CPUMCTX structure.
574 *
575 * @returns The CPUMCTX pointer.
576 * @param pVCpu Handle to the virtual cpu.
577 */
578VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
579{
580 return &pVCpu->cpum.s.Guest;
581}
582
583VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
584{
585#ifdef VBOX_WITH_IEM
586# ifdef VBOX_WITH_RAW_MODE_NOT_R0
587 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
588 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
589# endif
590#endif
591 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
592 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
593 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
594 return VINF_SUCCESS; /* formality, consider it void. */
595}
596
597VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
598{
599#ifdef VBOX_WITH_IEM
600# ifdef VBOX_WITH_RAW_MODE_NOT_R0
601 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
602 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
603# endif
604#endif
605 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
606 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
607 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
608 return VINF_SUCCESS; /* formality, consider it void. */
609}
610
611VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
612{
613#ifdef VBOX_WITH_IEM
614# ifdef VBOX_WITH_RAW_MODE_NOT_R0
615 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
617# endif
618#endif
619 pVCpu->cpum.s.Guest.tr.Sel = tr;
620 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
621 return VINF_SUCCESS; /* formality, consider it void. */
622}
623
624VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
625{
626#ifdef VBOX_WITH_IEM
627# ifdef VBOX_WITH_RAW_MODE_NOT_R0
628 if ( ( ldtr != 0
629 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
630 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
631 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
632# endif
633#endif
634 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
635 /* The caller will set more hidden bits if it has them. */
636 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
637 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
638 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
639 return VINF_SUCCESS; /* formality, consider it void. */
640}
641
642
643/**
644 * Set the guest CR0.
645 *
646 * When called in GC, the hyper CR0 may be updated if that is
647 * required. The caller only has to take special action if AM,
648 * WP, PG or PE changes.
649 *
650 * @returns VINF_SUCCESS (consider it void).
651 * @param pVCpu Handle to the virtual cpu.
652 * @param cr0 The new CR0 value.
653 */
654VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
655{
656#ifdef IN_RC
657 /*
658 * Check if we need to change hypervisor CR0 because
659 * of math stuff.
660 */
661 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
662 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
663 {
664 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
665 {
666 /*
667 * We haven't saved the host FPU state yet, so TS and MT are both set
668 * and EM should be reflecting the guest EM (it always does this).
669 */
670 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
671 {
672 uint32_t HyperCR0 = ASMGetCR0();
673 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
674 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
675 HyperCR0 &= ~X86_CR0_EM;
676 HyperCR0 |= cr0 & X86_CR0_EM;
677 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
678 ASMSetCR0(HyperCR0);
679 }
680# ifdef VBOX_STRICT
681 else
682 {
683 uint32_t HyperCR0 = ASMGetCR0();
684 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
685 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
686 }
687# endif
688 }
689 else
690 {
691 /*
692 * Already saved the state, so we're just mirroring
693 * the guest flags.
694 */
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
697 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
698 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
699 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
700 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
701 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
702 ASMSetCR0(HyperCR0);
703 }
704 }
705#endif /* IN_RC */
706
707 /*
708 * Check for changes causing TLB flushes (for REM).
709 * The caller is responsible for calling PGM when appropriate.
710 */
711 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
712 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
713 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
714 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
715
716 /*
717 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
718 */
719 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
720 PGMCr0WpEnabled(pVCpu);
721
722 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
723 return VINF_SUCCESS;
724}
725
726
727VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
728{
729 pVCpu->cpum.s.Guest.cr2 = cr2;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
735{
736 pVCpu->cpum.s.Guest.cr3 = cr3;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
743{
744 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
745 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
746 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
747 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
748 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
749 cr4 &= ~X86_CR4_OSFSXR;
750 pVCpu->cpum.s.Guest.cr4 = cr4;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
756{
757 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
763{
764 pVCpu->cpum.s.Guest.eip = eip;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
770{
771 pVCpu->cpum.s.Guest.eax = eax;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
777{
778 pVCpu->cpum.s.Guest.ebx = ebx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
784{
785 pVCpu->cpum.s.Guest.ecx = ecx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
791{
792 pVCpu->cpum.s.Guest.edx = edx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
798{
799 pVCpu->cpum.s.Guest.esp = esp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
805{
806 pVCpu->cpum.s.Guest.ebp = ebp;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
812{
813 pVCpu->cpum.s.Guest.esi = esi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
819{
820 pVCpu->cpum.s.Guest.edi = edi;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
826{
827 pVCpu->cpum.s.Guest.ss.Sel = ss;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
833{
834 pVCpu->cpum.s.Guest.cs.Sel = cs;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
840{
841 pVCpu->cpum.s.Guest.ds.Sel = ds;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
847{
848 pVCpu->cpum.s.Guest.es.Sel = es;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
854{
855 pVCpu->cpum.s.Guest.fs.Sel = fs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
861{
862 pVCpu->cpum.s.Guest.gs.Sel = gs;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
868{
869 pVCpu->cpum.s.Guest.msrEFER = val;
870}
871
872
873VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
874{
875 if (pcbLimit)
876 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
877 return pVCpu->cpum.s.Guest.idtr.pIdt;
878}
879
880
881VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
882{
883 if (pHidden)
884 *pHidden = pVCpu->cpum.s.Guest.tr;
885 return pVCpu->cpum.s.Guest.tr.Sel;
886}
887
888
889VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
890{
891 return pVCpu->cpum.s.Guest.cs.Sel;
892}
893
894
895VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
896{
897 return pVCpu->cpum.s.Guest.ds.Sel;
898}
899
900
901VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
902{
903 return pVCpu->cpum.s.Guest.es.Sel;
904}
905
906
907VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
908{
909 return pVCpu->cpum.s.Guest.fs.Sel;
910}
911
912
913VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
914{
915 return pVCpu->cpum.s.Guest.gs.Sel;
916}
917
918
919VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
920{
921 return pVCpu->cpum.s.Guest.ss.Sel;
922}
923
924
925VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
926{
927 return pVCpu->cpum.s.Guest.ldtr.Sel;
928}
929
930
931VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
932{
933 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
934 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
935 return pVCpu->cpum.s.Guest.ldtr.Sel;
936}
937
938
939VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
940{
941 return pVCpu->cpum.s.Guest.cr0;
942}
943
944
945VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
946{
947 return pVCpu->cpum.s.Guest.cr2;
948}
949
950
951VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
952{
953 return pVCpu->cpum.s.Guest.cr3;
954}
955
956
957VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
958{
959 return pVCpu->cpum.s.Guest.cr4;
960}
961
962
963VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
964{
965 uint64_t u64;
966 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
967 if (RT_FAILURE(rc))
968 u64 = 0;
969 return u64;
970}
971
972
973VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
974{
975 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
976}
977
978
979VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
980{
981 return pVCpu->cpum.s.Guest.eip;
982}
983
984
985VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
986{
987 return pVCpu->cpum.s.Guest.rip;
988}
989
990
991VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
992{
993 return pVCpu->cpum.s.Guest.eax;
994}
995
996
997VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
998{
999 return pVCpu->cpum.s.Guest.ebx;
1000}
1001
1002
1003VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1004{
1005 return pVCpu->cpum.s.Guest.ecx;
1006}
1007
1008
1009VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1010{
1011 return pVCpu->cpum.s.Guest.edx;
1012}
1013
1014
1015VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1016{
1017 return pVCpu->cpum.s.Guest.esi;
1018}
1019
1020
1021VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1022{
1023 return pVCpu->cpum.s.Guest.edi;
1024}
1025
1026
1027VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1028{
1029 return pVCpu->cpum.s.Guest.esp;
1030}
1031
1032
1033VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1034{
1035 return pVCpu->cpum.s.Guest.ebp;
1036}
1037
1038
1039VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1040{
1041 return pVCpu->cpum.s.Guest.eflags.u32;
1042}
1043
1044
1045VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1046{
1047 switch (iReg)
1048 {
1049 case DISCREG_CR0:
1050 *pValue = pVCpu->cpum.s.Guest.cr0;
1051 break;
1052
1053 case DISCREG_CR2:
1054 *pValue = pVCpu->cpum.s.Guest.cr2;
1055 break;
1056
1057 case DISCREG_CR3:
1058 *pValue = pVCpu->cpum.s.Guest.cr3;
1059 break;
1060
1061 case DISCREG_CR4:
1062 *pValue = pVCpu->cpum.s.Guest.cr4;
1063 break;
1064
1065 case DISCREG_CR8:
1066 {
1067 uint8_t u8Tpr;
1068 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1069 if (RT_FAILURE(rc))
1070 {
1071 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1072 *pValue = 0;
1073 return rc;
1074 }
1075 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1076 break;
1077 }
1078
1079 default:
1080 return VERR_INVALID_PARAMETER;
1081 }
1082 return VINF_SUCCESS;
1083}
1084
1085
1086VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1087{
1088 return pVCpu->cpum.s.Guest.dr[0];
1089}
1090
1091
1092VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1093{
1094 return pVCpu->cpum.s.Guest.dr[1];
1095}
1096
1097
1098VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1099{
1100 return pVCpu->cpum.s.Guest.dr[2];
1101}
1102
1103
1104VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1105{
1106 return pVCpu->cpum.s.Guest.dr[3];
1107}
1108
1109
1110VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1111{
1112 return pVCpu->cpum.s.Guest.dr[6];
1113}
1114
1115
1116VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1117{
1118 return pVCpu->cpum.s.Guest.dr[7];
1119}
1120
1121
1122VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1123{
1124 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1125 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1126 if (iReg == 4 || iReg == 5)
1127 iReg += 2;
1128 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1129 return VINF_SUCCESS;
1130}
1131
1132
1133VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1134{
1135 return pVCpu->cpum.s.Guest.msrEFER;
1136}
1137
1138
1139/**
1140 * Looks up a CPUID leaf in the CPUID leaf array.
1141 *
1142 * @returns Pointer to the leaf if found, NULL if not.
1143 *
1144 * @param pVM Pointer to the cross context VM structure.
1145 * @param uLeaf The leaf to get.
1146 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1147 * isn't.
1148 */
1149PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf)
1150{
1151 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1152 if (iEnd)
1153 {
1154 unsigned iStart = 0;
1155 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1156 for (;;)
1157 {
1158 unsigned i = iStart + (iEnd - iStart) / 2U;
1159 if (uLeaf < paLeaves[i].uLeaf)
1160 {
1161 if (i <= iStart)
1162 return NULL;
1163 iEnd = i;
1164 }
1165 else if (uLeaf > paLeaves[i].uLeaf)
1166 {
1167 i += 1;
1168 if (i >= iEnd)
1169 return NULL;
1170 iStart = i;
1171 }
1172 else
1173 {
1174 uSubLeaf &= paLeaves[i].fSubLeafMask;
1175 if (uSubLeaf != paLeaves[i].uSubLeaf)
1176 {
1177 /* Find the right subleaf. We return the last one before
1178 uSubLeaf if we don't find an exact match. */
1179 if (uSubLeaf < paLeaves[i].uSubLeaf)
1180 while ( i > 0
1181 && uLeaf == paLeaves[i].uLeaf
1182 && uSubLeaf < paLeaves[i].uSubLeaf)
1183 i--;
1184 else
1185 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1186 && uLeaf == paLeaves[i + 1].uLeaf
1187 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1188 i++;
1189 }
1190 return &paLeaves[i];
1191 }
1192 }
1193 }
1194
1195 return NULL;
1196}
1197
1198
1199/**
1200 * Gets a CPUID leaf.
1201 *
1202 * @param pVCpu Pointer to the VMCPU.
1203 * @param iLeaf The CPUID leaf to get.
1204 * @param pEax Where to store the EAX value.
1205 * @param pEbx Where to store the EBX value.
1206 * @param pEcx Where to store the ECX value.
1207 * @param pEdx Where to store the EDX value.
1208 */
1209VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1210{
1211 PVM pVM = pVCpu->CTX_SUFF(pVM);
1212
1213 PCCPUMCPUID pCpuId;
1214 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1215 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1216 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1217 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1218 else if ( iLeaf - UINT32_C(0x40000000) < 0x100 /** @todo Fix this later: Hyper-V says 0x400000FF is the last valid leaf. */
1219 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */
1220 {
1221 PCPUMCPUIDLEAF pHyperLeaf = cpumCpuIdGetLeaf(pVM, iLeaf, 0 /* uSubLeaf */);
1222 if (RT_LIKELY(pHyperLeaf))
1223 {
1224 *pEax = pHyperLeaf->uEax;
1225 *pEbx = pHyperLeaf->uEbx;
1226 *pEcx = pHyperLeaf->uEcx;
1227 *pEdx = pHyperLeaf->uEdx;
1228 }
1229 else
1230 {
1231 *pEax = *pEbx = *pEcx = *pEdx = 0;
1232 }
1233 return;
1234 }
1235 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1236 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1237 else
1238 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1239
1240 uint32_t cCurrentCacheIndex = *pEcx;
1241
1242 *pEax = pCpuId->eax;
1243 *pEbx = pCpuId->ebx;
1244 *pEcx = pCpuId->ecx;
1245 *pEdx = pCpuId->edx;
1246
1247 if ( iLeaf == 1)
1248 {
1249 /* Bits 31-24: Initial APIC ID */
1250 Assert(pVCpu->idCpu <= 255);
1251 *pEbx |= (pVCpu->idCpu << 24);
1252 }
1253
1254 if ( iLeaf == 4
1255 && cCurrentCacheIndex < 3
1256 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1257 {
1258 uint32_t type, level, sharing, linesize,
1259 partitions, associativity, sets, cores;
1260
1261 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1262 partitions = 1;
1263 /* Those are only to shut up compiler, as they will always
1264 get overwritten, and compiler should be able to figure that out */
1265 sets = associativity = sharing = level = 1;
1266 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1267 switch (cCurrentCacheIndex)
1268 {
1269 case 0:
1270 type = 1;
1271 level = 1;
1272 sharing = 1;
1273 linesize = 64;
1274 associativity = 8;
1275 sets = 64;
1276 break;
1277 case 1:
1278 level = 1;
1279 type = 2;
1280 sharing = 1;
1281 linesize = 64;
1282 associativity = 8;
1283 sets = 64;
1284 break;
1285 default: /* shut up gcc.*/
1286 AssertFailed();
1287 case 2:
1288 level = 2;
1289 type = 3;
1290 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1291 linesize = 64;
1292 associativity = 24;
1293 sets = 4096;
1294 break;
1295 }
1296
1297 NOREF(type);
1298 *pEax |= ((cores - 1) << 26) |
1299 ((sharing - 1) << 14) |
1300 (level << 5) |
1301 1;
1302 *pEbx = (linesize - 1) |
1303 ((partitions - 1) << 12) |
1304 ((associativity - 1) << 22); /* -1 encoding */
1305 *pEcx = sets - 1;
1306 }
1307
1308 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1309}
1310
1311/**
1312 * Gets a number of standard CPUID leafs.
1313 *
1314 * @returns Number of leafs.
1315 * @param pVM Pointer to the VM.
1316 * @remark Intended for PATM.
1317 */
1318VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1319{
1320 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1321}
1322
1323
1324/**
1325 * Gets a number of extended CPUID leafs.
1326 *
1327 * @returns Number of leafs.
1328 * @param pVM Pointer to the VM.
1329 * @remark Intended for PATM.
1330 */
1331VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1332{
1333 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1334}
1335
1336
1337/**
1338 * Gets a number of centaur CPUID leafs.
1339 *
1340 * @returns Number of leafs.
1341 * @param pVM Pointer to the VM.
1342 * @remark Intended for PATM.
1343 */
1344VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1345{
1346 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1347}
1348
1349
1350/**
1351 * Sets a CPUID feature bit.
1352 *
1353 * @param pVM Pointer to the VM.
1354 * @param enmFeature The feature to set.
1355 */
1356VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1357{
1358 PCPUMCPUIDLEAF pLeaf;
1359
1360 switch (enmFeature)
1361 {
1362 /*
1363 * Set the APIC bit in both feature masks.
1364 */
1365 case CPUMCPUIDFEATURE_APIC:
1366 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1367 if (pLeaf)
1368 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
1369
1370 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1371 if ( pLeaf
1372 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1373 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1374
1375 pVM->cpum.s.GuestFeatures.fApic = 1;
1376 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
1377 break;
1378
1379 /*
1380 * Set the x2APIC bit in the standard feature mask.
1381 */
1382 case CPUMCPUIDFEATURE_X2APIC:
1383 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1384 if (pLeaf)
1385 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
1386 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
1387 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
1388 break;
1389
1390 /*
1391 * Set the sysenter/sysexit bit in the standard feature mask.
1392 * Assumes the caller knows what it's doing! (host must support these)
1393 */
1394 case CPUMCPUIDFEATURE_SEP:
1395 if (!pVM->cpum.s.HostFeatures.fSysEnter)
1396 {
1397 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1398 return;
1399 }
1400
1401 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1402 if (pLeaf)
1403 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
1404 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
1405 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
1406 break;
1407
1408 /*
1409 * Set the syscall/sysret bit in the extended feature mask.
1410 * Assumes the caller knows what it's doing! (host must support these)
1411 */
1412 case CPUMCPUIDFEATURE_SYSCALL:
1413 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1414 if ( !pLeaf
1415 || !pVM->cpum.s.HostFeatures.fSysCall)
1416 {
1417#if HC_ARCH_BITS == 32
1418 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
1419 mode by Intel, even when the cpu is capable of doing so in
1420 64-bit mode. Long mode requires syscall support. */
1421 if (!pVM->cpum.s.HostFeatures.fLongMode)
1422#endif
1423 {
1424 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
1425 return;
1426 }
1427 }
1428
1429 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1430 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1431 pVM->cpum.s.GuestFeatures.fSysCall = 1;
1432 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
1433 break;
1434
1435 /*
1436 * Set the PAE bit in both feature masks.
1437 * Assumes the caller knows what it's doing! (host must support these)
1438 */
1439 case CPUMCPUIDFEATURE_PAE:
1440 if (!pVM->cpum.s.HostFeatures.fPae)
1441 {
1442 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
1443 return;
1444 }
1445
1446 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1447 if (pLeaf)
1448 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
1449
1450 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1451 if ( pLeaf
1452 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1453 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1454
1455 pVM->cpum.s.GuestFeatures.fPae = 1;
1456 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
1457 break;
1458
1459 /*
1460 * Set the LONG MODE bit in the extended feature mask.
1461 * Assumes the caller knows what it's doing! (host must support these)
1462 */
1463 case CPUMCPUIDFEATURE_LONG_MODE:
1464 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1465 if ( !pLeaf
1466 || !pVM->cpum.s.HostFeatures.fLongMode)
1467 {
1468 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
1469 return;
1470 }
1471
1472 /* Valid for both Intel and AMD. */
1473 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1474 pVM->cpum.s.GuestFeatures.fLongMode = 1;
1475 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
1476 break;
1477
1478 /*
1479 * Set the NX/XD bit in the extended feature mask.
1480 * Assumes the caller knows what it's doing! (host must support these)
1481 */
1482 case CPUMCPUIDFEATURE_NX:
1483 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1484 if ( !pLeaf
1485 || !pVM->cpum.s.HostFeatures.fNoExecute)
1486 {
1487 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
1488 return;
1489 }
1490
1491 /* Valid for both Intel and AMD. */
1492 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1493 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
1494 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
1495 break;
1496
1497
1498 /*
1499 * Set the LAHF/SAHF support in 64-bit mode.
1500 * Assumes the caller knows what it's doing! (host must support this)
1501 */
1502 case CPUMCPUIDFEATURE_LAHF:
1503 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1504 if ( !pLeaf
1505 || !pVM->cpum.s.HostFeatures.fLahfSahf)
1506 {
1507 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
1508 return;
1509 }
1510
1511 /* Valid for both Intel and AMD. */
1512 pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1513 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
1514 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1515 break;
1516
1517 /*
1518 * Set the page attribute table bit. This is alternative page level
1519 * cache control that doesn't much matter when everything is
1520 * virtualized, though it may when passing thru device memory.
1521 */
1522 case CPUMCPUIDFEATURE_PAT:
1523 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1524 if (pLeaf)
1525 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
1526
1527 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1528 if ( pLeaf
1529 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1530 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1531
1532 pVM->cpum.s.GuestFeatures.fPat = 1;
1533 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
1534 break;
1535
1536 /*
1537 * Set the RDTSCP support bit.
1538 * Assumes the caller knows what it's doing! (host must support this)
1539 */
1540 case CPUMCPUIDFEATURE_RDTSCP:
1541 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1542 if ( !pLeaf
1543 || !pVM->cpum.s.HostFeatures.fRdTscP
1544 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1545 {
1546 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1547 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
1548 return;
1549 }
1550
1551 /* Valid for both Intel and AMD. */
1552 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1553 pVM->cpum.s.HostFeatures.fRdTscP = 1;
1554 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1555 break;
1556
1557 /*
1558 * Set the Hypervisor Present bit in the standard feature mask.
1559 */
1560 case CPUMCPUIDFEATURE_HVP:
1561 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1562 if (pLeaf)
1563 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
1564 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
1565 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1566 break;
1567
1568 /*
1569 * Set the MWAIT Extensions Present bit in the MWAIT/MONITOR leaf.
1570 * This currently includes the Present bit and MWAITBREAK bit as well.
1571 */
1572 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1573 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0);
1574 if ( !pLeaf
1575 || !pVM->cpum.s.HostFeatures.fMWaitExtensions)
1576 {
1577 LogRel(("CPUM: WARNING! Can't turn on MWAIT Extensions when the host doesn't support it!\n"));
1578 return;
1579 }
1580
1581 /* Valid for both Intel and AMD. */
1582 pVM->cpum.s.aGuestCpuIdStd[5].ecx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1583 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
1584 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
1585 break;
1586
1587 default:
1588 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1589 break;
1590 }
1591
1592 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1593 {
1594 PVMCPU pVCpu = &pVM->aCpus[i];
1595 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1596 }
1597}
1598
1599
1600/**
1601 * Queries a CPUID feature bit.
1602 *
1603 * @returns boolean for feature presence
1604 * @param pVM Pointer to the VM.
1605 * @param enmFeature The feature to query.
1606 */
1607VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1608{
1609 switch (enmFeature)
1610 {
1611 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
1612 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
1613 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
1614 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
1615 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
1616 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
1617 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
1618 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
1619 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat;
1620 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
1621 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
1622 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
1623
1624 case CPUMCPUIDFEATURE_INVALID:
1625 case CPUMCPUIDFEATURE_32BIT_HACK:
1626 break;
1627 }
1628 AssertFailed();
1629 return false;
1630}
1631
1632
1633/**
1634 * Clears a CPUID feature bit.
1635 *
1636 * @param pVM Pointer to the VM.
1637 * @param enmFeature The feature to clear.
1638 */
1639VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1640{
1641 PCPUMCPUIDLEAF pLeaf;
1642 switch (enmFeature)
1643 {
1644 case CPUMCPUIDFEATURE_APIC:
1645 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1646 if (pLeaf)
1647 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1648
1649 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1650 if ( pLeaf
1651 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1652 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1653
1654 pVM->cpum.s.GuestFeatures.fApic = 0;
1655 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
1656 break;
1657
1658 case CPUMCPUIDFEATURE_X2APIC:
1659 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1660 if (pLeaf)
1661 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1662 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
1663 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
1664 break;
1665
1666 case CPUMCPUIDFEATURE_PAE:
1667 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1668 if (pLeaf)
1669 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
1670
1671 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1672 if ( pLeaf
1673 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1674 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1675
1676 pVM->cpum.s.GuestFeatures.fPae = 0;
1677 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
1678 break;
1679
1680 case CPUMCPUIDFEATURE_PAT:
1681 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1682 if (pLeaf)
1683 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
1684
1685 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1686 if ( pLeaf
1687 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1688 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1689
1690 pVM->cpum.s.GuestFeatures.fPat = 0;
1691 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
1692 break;
1693
1694 case CPUMCPUIDFEATURE_LONG_MODE:
1695 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1696 if (pLeaf)
1697 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1698 pVM->cpum.s.GuestFeatures.fLongMode = 0;
1699 break;
1700
1701 case CPUMCPUIDFEATURE_LAHF:
1702 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1703 if (pLeaf)
1704 pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1705 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
1706 break;
1707
1708 case CPUMCPUIDFEATURE_RDTSCP:
1709 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1710 if (pLeaf)
1711 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1712 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
1713 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
1714 break;
1715
1716 case CPUMCPUIDFEATURE_HVP:
1717 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1718 if (pLeaf)
1719 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
1720 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
1721 break;
1722
1723 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1724 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0);
1725 if (pLeaf)
1726 pVM->cpum.s.aGuestCpuIdStd[5].ecx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1727 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
1728 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
1729 break;
1730
1731 default:
1732 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1733 break;
1734 }
1735
1736 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1737 {
1738 PVMCPU pVCpu = &pVM->aCpus[i];
1739 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1740 }
1741}
1742
1743
1744/**
1745 * Gets the host CPU vendor.
1746 *
1747 * @returns CPU vendor.
1748 * @param pVM Pointer to the VM.
1749 */
1750VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1751{
1752 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1753}
1754
1755
1756/**
1757 * Gets the CPU vendor.
1758 *
1759 * @returns CPU vendor.
1760 * @param pVM Pointer to the VM.
1761 */
1762VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1763{
1764 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1765}
1766
1767
1768VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1769{
1770 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1771 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1772}
1773
1774
1775VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1776{
1777 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1778 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1779}
1780
1781
1782VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1783{
1784 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1785 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1786}
1787
1788
1789VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1790{
1791 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1792 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1793}
1794
1795
1796VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1797{
1798 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1799 return VINF_SUCCESS; /* No need to recalc. */
1800}
1801
1802
1803VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1804{
1805 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1806 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1807}
1808
1809
1810VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1811{
1812 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1813 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1814 if (iReg == 4 || iReg == 5)
1815 iReg += 2;
1816 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1817 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1818}
1819
1820
1821/**
1822 * Recalculates the hypervisor DRx register values based on current guest
1823 * registers and DBGF breakpoints, updating changed registers depending on the
1824 * context.
1825 *
1826 * This is called whenever a guest DRx register is modified (any context) and
1827 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1828 *
1829 * In raw-mode context this function will reload any (hyper) DRx registers which
1830 * comes out with a different value. It may also have to save the host debug
1831 * registers if that haven't been done already. In this context though, we'll
1832 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1833 * are only important when breakpoints are actually enabled.
1834 *
1835 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1836 * reloaded by the HM code if it changes. Further more, we will only use the
1837 * combined register set when the VBox debugger is actually using hardware BPs,
1838 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1839 * concern us here).
1840 *
1841 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1842 * all the time.
1843 *
1844 * @returns VINF_SUCCESS.
1845 * @param pVCpu Pointer to the VMCPU.
1846 * @param iGstReg The guest debug register number that was modified.
1847 * UINT8_MAX if not guest register.
1848 * @param fForceHyper Used in HM to force hyper registers because of single
1849 * stepping.
1850 */
1851VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1852{
1853 PVM pVM = pVCpu->CTX_SUFF(pVM);
1854
1855 /*
1856 * Compare the DR7s first.
1857 *
1858 * We only care about the enabled flags. GD is virtualized when we
1859 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1860 * always have the LE and GE bits set, so no need to check and disable
1861 * stuff if they're cleared like we have to for the guest DR7.
1862 */
1863 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1864 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1865 uGstDr7 = 0;
1866 else if (!(uGstDr7 & X86_DR7_LE))
1867 uGstDr7 &= ~X86_DR7_LE_ALL;
1868 else if (!(uGstDr7 & X86_DR7_GE))
1869 uGstDr7 &= ~X86_DR7_GE_ALL;
1870
1871 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1872
1873#ifdef IN_RING0
1874 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1875 fForceHyper = true;
1876#endif
1877 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1878 {
1879 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1880#ifdef IN_RC
1881 bool const fHmEnabled = false;
1882#elif defined(IN_RING3)
1883 bool const fHmEnabled = HMIsEnabled(pVM);
1884#endif
1885
1886 /*
1887 * Ok, something is enabled. Recalc each of the breakpoints, taking
1888 * the VM debugger ones of the guest ones. In raw-mode context we will
1889 * not allow breakpoints with values inside the hypervisor area.
1890 */
1891 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1892
1893 /* bp 0 */
1894 RTGCUINTREG uNewDr0;
1895 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1896 {
1897 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1898 uNewDr0 = DBGFBpGetDR0(pVM);
1899 }
1900 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1901 {
1902 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1903#ifndef IN_RING0
1904 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1905 uNewDr0 = 0;
1906 else
1907#endif
1908 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1909 }
1910 else
1911 uNewDr0 = 0;
1912
1913 /* bp 1 */
1914 RTGCUINTREG uNewDr1;
1915 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1916 {
1917 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1918 uNewDr1 = DBGFBpGetDR1(pVM);
1919 }
1920 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1921 {
1922 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1923#ifndef IN_RING0
1924 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1925 uNewDr1 = 0;
1926 else
1927#endif
1928 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1929 }
1930 else
1931 uNewDr1 = 0;
1932
1933 /* bp 2 */
1934 RTGCUINTREG uNewDr2;
1935 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1936 {
1937 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1938 uNewDr2 = DBGFBpGetDR2(pVM);
1939 }
1940 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1941 {
1942 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1943#ifndef IN_RING0
1944 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1945 uNewDr2 = 0;
1946 else
1947#endif
1948 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1949 }
1950 else
1951 uNewDr2 = 0;
1952
1953 /* bp 3 */
1954 RTGCUINTREG uNewDr3;
1955 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1956 {
1957 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1958 uNewDr3 = DBGFBpGetDR3(pVM);
1959 }
1960 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1961 {
1962 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1963#ifndef IN_RING0
1964 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1965 uNewDr3 = 0;
1966 else
1967#endif
1968 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1969 }
1970 else
1971 uNewDr3 = 0;
1972
1973 /*
1974 * Apply the updates.
1975 */
1976#ifdef IN_RC
1977 /* Make sure to save host registers first. */
1978 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1979 {
1980 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1981 {
1982 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1983 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1984 }
1985 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1986 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1987 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1988 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1989 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1990
1991 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1992 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1993 ASMSetDR0(uNewDr0);
1994 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1995 ASMSetDR1(uNewDr1);
1996 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1997 ASMSetDR2(uNewDr2);
1998 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1999 ASMSetDR3(uNewDr3);
2000 ASMSetDR6(X86_DR6_INIT_VAL);
2001 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2002 ASMSetDR7(uNewDr7);
2003 }
2004 else
2005#endif
2006 {
2007 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2008 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2009 CPUMSetHyperDR3(pVCpu, uNewDr3);
2010 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2011 CPUMSetHyperDR2(pVCpu, uNewDr2);
2012 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2013 CPUMSetHyperDR1(pVCpu, uNewDr1);
2014 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2015 CPUMSetHyperDR0(pVCpu, uNewDr0);
2016 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2017 CPUMSetHyperDR7(pVCpu, uNewDr7);
2018 }
2019 }
2020#ifdef IN_RING0
2021 else if (CPUMIsGuestDebugStateActive(pVCpu))
2022 {
2023 /*
2024 * Reload the register that was modified. Normally this won't happen
2025 * as we won't intercept DRx writes when not having the hyper debug
2026 * state loaded, but in case we do for some reason we'll simply deal
2027 * with it.
2028 */
2029 switch (iGstReg)
2030 {
2031 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2032 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2033 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2034 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2035 default:
2036 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2037 }
2038 }
2039#endif
2040 else
2041 {
2042 /*
2043 * No active debug state any more. In raw-mode this means we have to
2044 * make sure DR7 has everything disabled now, if we armed it already.
2045 * In ring-0 we might end up here when just single stepping.
2046 */
2047#if defined(IN_RC) || defined(IN_RING0)
2048 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2049 {
2050# ifdef IN_RC
2051 ASMSetDR7(X86_DR7_INIT_VAL);
2052# endif
2053 if (pVCpu->cpum.s.Hyper.dr[0])
2054 ASMSetDR0(0);
2055 if (pVCpu->cpum.s.Hyper.dr[1])
2056 ASMSetDR1(0);
2057 if (pVCpu->cpum.s.Hyper.dr[2])
2058 ASMSetDR2(0);
2059 if (pVCpu->cpum.s.Hyper.dr[3])
2060 ASMSetDR3(0);
2061 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2062 }
2063#endif
2064 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2065
2066 /* Clear all the registers. */
2067 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2068 pVCpu->cpum.s.Hyper.dr[3] = 0;
2069 pVCpu->cpum.s.Hyper.dr[2] = 0;
2070 pVCpu->cpum.s.Hyper.dr[1] = 0;
2071 pVCpu->cpum.s.Hyper.dr[0] = 0;
2072
2073 }
2074 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2075 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2076 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2077 pVCpu->cpum.s.Hyper.dr[7]));
2078
2079 return VINF_SUCCESS;
2080}
2081
2082
2083/**
2084 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2085 *
2086 * @returns true if in real mode, otherwise false.
2087 * @param pVCpu Pointer to the VMCPU.
2088 */
2089VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2090{
2091 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2092}
2093
2094
2095/**
2096 * Tests if the guest has the Page Size Extension enabled (PSE).
2097 *
2098 * @returns true if in real mode, otherwise false.
2099 * @param pVCpu Pointer to the VMCPU.
2100 */
2101VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2102{
2103 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2104 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2105}
2106
2107
2108/**
2109 * Tests if the guest has the paging enabled (PG).
2110 *
2111 * @returns true if in real mode, otherwise false.
2112 * @param pVCpu Pointer to the VMCPU.
2113 */
2114VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2115{
2116 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2117}
2118
2119
2120/**
2121 * Tests if the guest has the paging enabled (PG).
2122 *
2123 * @returns true if in real mode, otherwise false.
2124 * @param pVCpu Pointer to the VMCPU.
2125 */
2126VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2127{
2128 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2129}
2130
2131
2132/**
2133 * Tests if the guest is running in real mode or not.
2134 *
2135 * @returns true if in real mode, otherwise false.
2136 * @param pVCpu Pointer to the VMCPU.
2137 */
2138VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2139{
2140 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2141}
2142
2143
2144/**
2145 * Tests if the guest is running in real or virtual 8086 mode.
2146 *
2147 * @returns @c true if it is, @c false if not.
2148 * @param pVCpu Pointer to the VMCPU.
2149 */
2150VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2151{
2152 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2153 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2154}
2155
2156
2157/**
2158 * Tests if the guest is running in protected or not.
2159 *
2160 * @returns true if in protected mode, otherwise false.
2161 * @param pVCpu Pointer to the VMCPU.
2162 */
2163VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2164{
2165 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2166}
2167
2168
2169/**
2170 * Tests if the guest is running in paged protected or not.
2171 *
2172 * @returns true if in paged protected mode, otherwise false.
2173 * @param pVCpu Pointer to the VMCPU.
2174 */
2175VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2176{
2177 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2178}
2179
2180
2181/**
2182 * Tests if the guest is running in long mode or not.
2183 *
2184 * @returns true if in long mode, otherwise false.
2185 * @param pVCpu Pointer to the VMCPU.
2186 */
2187VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2188{
2189 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2190}
2191
2192
2193/**
2194 * Tests if the guest is running in PAE mode or not.
2195 *
2196 * @returns true if in PAE mode, otherwise false.
2197 * @param pVCpu Pointer to the VMCPU.
2198 */
2199VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2200{
2201 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2202 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2203 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2204 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2205 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2206}
2207
2208
2209/**
2210 * Tests if the guest is running in 64 bits mode or not.
2211 *
2212 * @returns true if in 64 bits protected mode, otherwise false.
2213 * @param pVCpu The current virtual CPU.
2214 */
2215VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2216{
2217 if (!CPUMIsGuestInLongMode(pVCpu))
2218 return false;
2219 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2220 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2221}
2222
2223
2224/**
2225 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2226 * registers.
2227 *
2228 * @returns true if in 64 bits protected mode, otherwise false.
2229 * @param pCtx Pointer to the current guest CPU context.
2230 */
2231VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2232{
2233 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2234}
2235
2236#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2237
2238/**
2239 *
2240 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2241 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2242 * @param pVCpu The current virtual CPU.
2243 */
2244VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2245{
2246 return pVCpu->cpum.s.fRawEntered;
2247}
2248
2249/**
2250 * Transforms the guest CPU state to raw-ring mode.
2251 *
2252 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2253 *
2254 * @returns VBox status. (recompiler failure)
2255 * @param pVCpu Pointer to the VMCPU.
2256 * @param pCtxCore The context core (for trap usage).
2257 * @see @ref pg_raw
2258 */
2259VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2260{
2261 PVM pVM = pVCpu->CTX_SUFF(pVM);
2262
2263 Assert(!pVCpu->cpum.s.fRawEntered);
2264 Assert(!pVCpu->cpum.s.fRemEntered);
2265 if (!pCtxCore)
2266 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
2267
2268 /*
2269 * Are we in Ring-0?
2270 */
2271 if ( pCtxCore->ss.Sel
2272 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
2273 && !pCtxCore->eflags.Bits.u1VM)
2274 {
2275 /*
2276 * Enter execution mode.
2277 */
2278 PATMRawEnter(pVM, pCtxCore);
2279
2280 /*
2281 * Set CPL to Ring-1.
2282 */
2283 pCtxCore->ss.Sel |= 1;
2284 if ( pCtxCore->cs.Sel
2285 && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
2286 pCtxCore->cs.Sel |= 1;
2287 }
2288 else
2289 {
2290# ifdef VBOX_WITH_RAW_RING1
2291 if ( EMIsRawRing1Enabled(pVM)
2292 && !pCtxCore->eflags.Bits.u1VM
2293 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
2294 {
2295 /* Set CPL to Ring-2. */
2296 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
2297 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2298 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
2299 }
2300# else
2301 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
2302 ("ring-1 code not supported\n"));
2303# endif
2304 /*
2305 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2306 */
2307 PATMRawEnter(pVM, pCtxCore);
2308 }
2309
2310 /*
2311 * Assert sanity.
2312 */
2313 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2314 AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
2315 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2316 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2317
2318 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
2319
2320 pVCpu->cpum.s.fRawEntered = true;
2321 return VINF_SUCCESS;
2322}
2323
2324
2325/**
2326 * Transforms the guest CPU state from raw-ring mode to correct values.
2327 *
2328 * This function will change any selector registers with DPL=1 to DPL=0.
2329 *
2330 * @returns Adjusted rc.
2331 * @param pVCpu Pointer to the VMCPU.
2332 * @param rc Raw mode return code
2333 * @param pCtxCore The context core (for trap usage).
2334 * @see @ref pg_raw
2335 */
2336VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
2337{
2338 PVM pVM = pVCpu->CTX_SUFF(pVM);
2339
2340 /*
2341 * Don't leave if we've already left (in RC).
2342 */
2343 Assert(!pVCpu->cpum.s.fRemEntered);
2344 if (!pVCpu->cpum.s.fRawEntered)
2345 return rc;
2346 pVCpu->cpum.s.fRawEntered = false;
2347
2348 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2349 if (!pCtxCore)
2350 pCtxCore = CPUMCTX2CORE(pCtx);
2351 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
2352 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
2353 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2354
2355 /*
2356 * Are we executing in raw ring-1?
2357 */
2358 if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
2359 && !pCtxCore->eflags.Bits.u1VM)
2360 {
2361 /*
2362 * Leave execution mode.
2363 */
2364 PATMRawLeave(pVM, pCtxCore, rc);
2365 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2366 /** @todo See what happens if we remove this. */
2367 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
2368 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
2369 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
2370 pCtxCore->es.Sel &= ~X86_SEL_RPL;
2371 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
2372 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
2373 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
2374 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
2375
2376 /*
2377 * Ring-1 selector => Ring-0.
2378 */
2379 pCtxCore->ss.Sel &= ~X86_SEL_RPL;
2380 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2381 pCtxCore->cs.Sel &= ~X86_SEL_RPL;
2382 }
2383 else
2384 {
2385 /*
2386 * PATM is taking care of the IOPL and IF flags for us.
2387 */
2388 PATMRawLeave(pVM, pCtxCore, rc);
2389 if (!pCtxCore->eflags.Bits.u1VM)
2390 {
2391# ifdef VBOX_WITH_RAW_RING1
2392 if ( EMIsRawRing1Enabled(pVM)
2393 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)
2394 {
2395 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2396 /** @todo See what happens if we remove this. */
2397 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2)
2398 pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1;
2399 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2)
2400 pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1;
2401 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2)
2402 pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1;
2403 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2)
2404 pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1;
2405
2406 /*
2407 * Ring-2 selector => Ring-1.
2408 */
2409 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1;
2410 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2)
2411 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1;
2412 }
2413 else
2414 {
2415# endif
2416 /** @todo See what happens if we remove this. */
2417 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
2418 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
2419 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
2420 pCtxCore->es.Sel &= ~X86_SEL_RPL;
2421 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
2422 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
2423 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
2424 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
2425# ifdef VBOX_WITH_RAW_RING1
2426 }
2427# endif
2428 }
2429 }
2430
2431 return rc;
2432}
2433
2434#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2435
2436/**
2437 * Updates the EFLAGS while we're in raw-mode.
2438 *
2439 * @param pVCpu Pointer to the VMCPU.
2440 * @param fEfl The new EFLAGS value.
2441 */
2442VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2443{
2444#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2445 if (pVCpu->cpum.s.fRawEntered)
2446 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
2447 else
2448#endif
2449 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2450}
2451
2452
2453/**
2454 * Gets the EFLAGS while we're in raw-mode.
2455 *
2456 * @returns The eflags.
2457 * @param pVCpu Pointer to the current virtual CPU.
2458 */
2459VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2460{
2461#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2462 if (pVCpu->cpum.s.fRawEntered)
2463 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
2464#endif
2465 return pVCpu->cpum.s.Guest.eflags.u32;
2466}
2467
2468
2469/**
2470 * Sets the specified changed flags (CPUM_CHANGED_*).
2471 *
2472 * @param pVCpu Pointer to the current virtual CPU.
2473 */
2474VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2475{
2476 pVCpu->cpum.s.fChanged |= fChangedFlags;
2477}
2478
2479
2480/**
2481 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2482 * @returns true if supported.
2483 * @returns false if not supported.
2484 * @param pVM Pointer to the VM.
2485 */
2486VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2487{
2488 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2489}
2490
2491
2492/**
2493 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2494 * @returns true if used.
2495 * @returns false if not used.
2496 * @param pVM Pointer to the VM.
2497 */
2498VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2499{
2500 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2501}
2502
2503
2504/**
2505 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2506 * @returns true if used.
2507 * @returns false if not used.
2508 * @param pVM Pointer to the VM.
2509 */
2510VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2511{
2512 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2513}
2514
2515#ifdef IN_RC
2516
2517/**
2518 * Lazily sync in the FPU/XMM state.
2519 *
2520 * @returns VBox status code.
2521 * @param pVCpu Pointer to the VMCPU.
2522 */
2523VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2524{
2525 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2526}
2527
2528#endif /* !IN_RC */
2529
2530/**
2531 * Checks if we activated the FPU/XMM state of the guest OS.
2532 * @returns true if we did.
2533 * @returns false if not.
2534 * @param pVCpu Pointer to the VMCPU.
2535 */
2536VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2537{
2538 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
2539}
2540
2541
2542/**
2543 * Deactivate the FPU/XMM state of the guest OS.
2544 * @param pVCpu Pointer to the VMCPU.
2545 *
2546 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled
2547 * FPU state management.
2548 */
2549VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2550{
2551 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
2552 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2553}
2554
2555
2556/**
2557 * Checks if the guest debug state is active.
2558 *
2559 * @returns boolean
2560 * @param pVM Pointer to the VMCPU.
2561 */
2562VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2563{
2564 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2565}
2566
2567
2568/**
2569 * Checks if the guest debug state is to be made active during the world-switch
2570 * (currently only used for the 32->64 switcher case).
2571 *
2572 * @returns boolean
2573 * @param pVM Pointer to the VMCPU.
2574 */
2575VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2576{
2577 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2578}
2579
2580
2581/**
2582 * Checks if the hyper debug state is active.
2583 *
2584 * @returns boolean
2585 * @param pVM Pointer to the VM.
2586 */
2587VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2588{
2589 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2590}
2591
2592
2593/**
2594 * Checks if the hyper debug state is to be made active during the world-switch
2595 * (currently only used for the 32->64 switcher case).
2596 *
2597 * @returns boolean
2598 * @param pVM Pointer to the VMCPU.
2599 */
2600VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2601{
2602 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2603}
2604
2605
2606/**
2607 * Mark the guest's debug state as inactive.
2608 *
2609 * @returns boolean
2610 * @param pVM Pointer to the VM.
2611 * @todo This API doesn't make sense any more.
2612 */
2613VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2614{
2615 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2616}
2617
2618
2619/**
2620 * Get the current privilege level of the guest.
2621 *
2622 * @returns CPL
2623 * @param pVCpu Pointer to the current virtual CPU.
2624 */
2625VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2626{
2627 /*
2628 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2629 *
2630 * Note! We used to check CS.DPL here, assuming it was always equal to
2631 * CPL even if a conforming segment was loaded. But this truned out to
2632 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2633 * during install after a far call to ring 2 with VT-x. Then on newer
2634 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2635 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2636 *
2637 * So, forget CS.DPL, always use SS.DPL.
2638 *
2639 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2640 * isn't necessarily equal if the segment is conforming.
2641 * See section 4.11.1 in the AMD manual.
2642 *
2643 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2644 * right after real->prot mode switch and when in V8086 mode? That
2645 * section says the RPL specified in a direct transfere (call, jmp,
2646 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2647 * it would be impossible for an exception handle or the iret
2648 * instruction to figure out whether SS:ESP are part of the frame
2649 * or not. VBox or qemu bug must've lead to this misconception.
2650 *
2651 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2652 * selector into SS with an RPL other than the CPL when CPL != 3 and
2653 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2654 * RPL = CPL. Weird.
2655 */
2656 uint32_t uCpl;
2657 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2658 {
2659 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2660 {
2661 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2662 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2663 else
2664 {
2665 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2666#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2667# ifdef VBOX_WITH_RAW_RING1
2668 if (pVCpu->cpum.s.fRawEntered)
2669 {
2670 if ( uCpl == 2
2671 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2672 uCpl = 1;
2673 else if (uCpl == 1)
2674 uCpl = 0;
2675 }
2676 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2677# else
2678 if (uCpl == 1)
2679 uCpl = 0;
2680# endif
2681#endif
2682 }
2683 }
2684 else
2685 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2686 }
2687 else
2688 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2689 return uCpl;
2690}
2691
2692
2693/**
2694 * Gets the current guest CPU mode.
2695 *
2696 * If paging mode is what you need, check out PGMGetGuestMode().
2697 *
2698 * @returns The CPU mode.
2699 * @param pVCpu Pointer to the VMCPU.
2700 */
2701VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2702{
2703 CPUMMODE enmMode;
2704 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2705 enmMode = CPUMMODE_REAL;
2706 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2707 enmMode = CPUMMODE_PROTECTED;
2708 else
2709 enmMode = CPUMMODE_LONG;
2710
2711 return enmMode;
2712}
2713
2714
2715/**
2716 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2717 *
2718 * @returns 16, 32 or 64.
2719 * @param pVCpu The current virtual CPU.
2720 */
2721VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2722{
2723 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2724 return 16;
2725
2726 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2727 {
2728 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2729 return 16;
2730 }
2731
2732 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2733 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2734 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2735 return 64;
2736
2737 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2738 return 32;
2739
2740 return 16;
2741}
2742
2743
2744VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2745{
2746 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2747 return DISCPUMODE_16BIT;
2748
2749 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2750 {
2751 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2752 return DISCPUMODE_16BIT;
2753 }
2754
2755 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2756 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2757 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2758 return DISCPUMODE_64BIT;
2759
2760 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2761 return DISCPUMODE_32BIT;
2762
2763 return DISCPUMODE_16BIT;
2764}
2765
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette