VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 54695

Last change on this file since 54695 was 54674, checked in by vboxsync, 10 years ago

CPUM: Working on refactoring the guest CPUID handling.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 79.1 KB
Line 
1/* $Id: CPUMAllRegs.cpp 54674 2015-03-06 18:02:31Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56/**
57 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
58 *
59 * @returns Pointer to the Virtual CPU.
60 * @param a_pGuestCtx Pointer to the guest context.
61 */
62#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
63
64/**
65 * Lazily loads the hidden parts of a selector register when using raw-mode.
66 */
67#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
68# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 do \
70 { \
71 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
72 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
73 } while (0)
74#else
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
77#endif
78
79
80
81#ifdef VBOX_WITH_RAW_MODE_NOT_R0
82
83/**
84 * Does the lazy hidden selector register loading.
85 *
86 * @param pVCpu The current Virtual CPU.
87 * @param pSReg The selector register to lazily load hidden parts of.
88 */
89static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
90{
91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
92 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
93 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
94
95 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
96 {
97 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
98 pSReg->Attr.u = 0;
99 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
100 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
101 pSReg->Attr.n.u2Dpl = 3;
102 pSReg->Attr.n.u1Present = 1;
103 pSReg->u32Limit = 0x0000ffff;
104 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
105 pSReg->ValidSel = pSReg->Sel;
106 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
107 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
108 }
109 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
110 {
111 /* Real mode - leave the limit and flags alone here, at least for now. */
112 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
113 pSReg->ValidSel = pSReg->Sel;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 }
116 else
117 {
118 /* Protected mode - get it from the selector descriptor tables. */
119 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
120 {
121 Assert(!CPUMIsGuestInLongMode(pVCpu));
122 pSReg->Sel = 0;
123 pSReg->u64Base = 0;
124 pSReg->u32Limit = 0;
125 pSReg->Attr.u = 0;
126 pSReg->ValidSel = 0;
127 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
128 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
129 }
130 else
131 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
132 }
133}
134
135
136/**
137 * Makes sure the hidden CS and SS selector registers are valid, loading them if
138 * necessary.
139 *
140 * @param pVCpu The current virtual CPU.
141 */
142VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
143{
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
145 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
146}
147
148
149/**
150 * Loads a the hidden parts of a selector register.
151 *
152 * @param pVCpu The current virtual CPU.
153 */
154VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
155{
156 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
157}
158
159#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
160
161
162/**
163 * Obsolete.
164 *
165 * We don't support nested hypervisor context interrupts or traps. Life is much
166 * simpler when we don't. It's also slightly faster at times.
167 *
168 * @param pVM Handle to the virtual machine.
169 */
170VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
171{
172 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
173}
174
175
176/**
177 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
178 *
179 * @param pVCpu Pointer to the VMCPU.
180 */
181VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
182{
183 return &pVCpu->cpum.s.Hyper;
184}
185
186
187VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
188{
189 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
190 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
191}
192
193
194VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
195{
196 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
197 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
198}
199
200
201VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
202{
203 pVCpu->cpum.s.Hyper.cr3 = cr3;
204
205#ifdef IN_RC
206 /* Update the current CR3. */
207 ASMSetCR3(cr3);
208#endif
209}
210
211VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
212{
213 return pVCpu->cpum.s.Hyper.cr3;
214}
215
216
217VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
218{
219 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
220}
221
222
223VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
224{
225 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
226}
227
228
229VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
230{
231 pVCpu->cpum.s.Hyper.es.Sel = SelES;
232}
233
234
235VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
236{
237 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
238}
239
240
241VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
242{
243 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
244}
245
246
247VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
248{
249 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
250}
251
252
253VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
254{
255 pVCpu->cpum.s.Hyper.esp = u32ESP;
256}
257
258
259VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
260{
261 pVCpu->cpum.s.Hyper.esp = u32ESP;
262}
263
264
265VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
266{
267 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
268 return VINF_SUCCESS;
269}
270
271
272VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
273{
274 pVCpu->cpum.s.Hyper.eip = u32EIP;
275}
276
277
278/**
279 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
280 * EFLAGS and EIP prior to resuming guest execution.
281 *
282 * All general register not given as a parameter will be set to 0. The EFLAGS
283 * register will be set to sane values for C/C++ code execution with interrupts
284 * disabled and IOPL 0.
285 *
286 * @param pVCpu The current virtual CPU.
287 * @param u32EIP The EIP value.
288 * @param u32ESP The ESP value.
289 * @param u32EAX The EAX value.
290 * @param u32EDX The EDX value.
291 */
292VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
293{
294 pVCpu->cpum.s.Hyper.eip = u32EIP;
295 pVCpu->cpum.s.Hyper.esp = u32ESP;
296 pVCpu->cpum.s.Hyper.eax = u32EAX;
297 pVCpu->cpum.s.Hyper.edx = u32EDX;
298 pVCpu->cpum.s.Hyper.ecx = 0;
299 pVCpu->cpum.s.Hyper.ebx = 0;
300 pVCpu->cpum.s.Hyper.ebp = 0;
301 pVCpu->cpum.s.Hyper.esi = 0;
302 pVCpu->cpum.s.Hyper.edi = 0;
303 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
304}
305
306
307VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
308{
309 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
310}
311
312
313VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
314{
315 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
316}
317
318
319/** @MAYBE_LOAD_DRx
320 * Macro for updating DRx values in raw-mode and ring-0 contexts.
321 */
322#ifdef IN_RING0
323# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
324# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
325# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
326 do { \
327 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
328 a_fnLoad(a_uValue); \
329 else \
330 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
331 } while (0)
332# else
333# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
334 do { \
335 /** @todo we're not loading the correct guest value here! */ \
336 a_fnLoad(a_uValue); \
337 } while (0)
338# endif
339# else
340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
341 do { \
342 a_fnLoad(a_uValue); \
343 } while (0)
344# endif
345
346#elif defined(IN_RC)
347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
348 do { \
349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
350 { a_fnLoad(a_uValue); } \
351 } while (0)
352
353#else
354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
355#endif
356
357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
358{
359 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
361}
362
363
364VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
365{
366 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
368}
369
370
371VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
372{
373 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
375}
376
377
378VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
379{
380 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
382}
383
384
385VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
386{
387 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
388}
389
390
391VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
392{
393 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
394#ifdef IN_RC
395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
396#endif
397}
398
399
400VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
401{
402 return pVCpu->cpum.s.Hyper.cs.Sel;
403}
404
405
406VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
407{
408 return pVCpu->cpum.s.Hyper.ds.Sel;
409}
410
411
412VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
413{
414 return pVCpu->cpum.s.Hyper.es.Sel;
415}
416
417
418VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
419{
420 return pVCpu->cpum.s.Hyper.fs.Sel;
421}
422
423
424VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
425{
426 return pVCpu->cpum.s.Hyper.gs.Sel;
427}
428
429
430VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
431{
432 return pVCpu->cpum.s.Hyper.ss.Sel;
433}
434
435
436VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
437{
438 return pVCpu->cpum.s.Hyper.eax;
439}
440
441
442VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
443{
444 return pVCpu->cpum.s.Hyper.ebx;
445}
446
447
448VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
449{
450 return pVCpu->cpum.s.Hyper.ecx;
451}
452
453
454VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
455{
456 return pVCpu->cpum.s.Hyper.edx;
457}
458
459
460VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
461{
462 return pVCpu->cpum.s.Hyper.esi;
463}
464
465
466VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
467{
468 return pVCpu->cpum.s.Hyper.edi;
469}
470
471
472VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
473{
474 return pVCpu->cpum.s.Hyper.ebp;
475}
476
477
478VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.esp;
481}
482
483
484VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.eflags.u32;
487}
488
489
490VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.eip;
493}
494
495
496VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.rip;
499}
500
501
502VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
503{
504 if (pcbLimit)
505 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
506 return pVCpu->cpum.s.Hyper.idtr.pIdt;
507}
508
509
510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
511{
512 if (pcbLimit)
513 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
514 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
515}
516
517
518VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
519{
520 return pVCpu->cpum.s.Hyper.ldtr.Sel;
521}
522
523
524VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
525{
526 return pVCpu->cpum.s.Hyper.dr[0];
527}
528
529
530VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
531{
532 return pVCpu->cpum.s.Hyper.dr[1];
533}
534
535
536VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
537{
538 return pVCpu->cpum.s.Hyper.dr[2];
539}
540
541
542VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
543{
544 return pVCpu->cpum.s.Hyper.dr[3];
545}
546
547
548VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
549{
550 return pVCpu->cpum.s.Hyper.dr[6];
551}
552
553
554VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
555{
556 return pVCpu->cpum.s.Hyper.dr[7];
557}
558
559
560/**
561 * Gets the pointer to the internal CPUMCTXCORE structure.
562 * This is only for reading in order to save a few calls.
563 *
564 * @param pVCpu Handle to the virtual cpu.
565 */
566VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
567{
568 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
569}
570
571
572/**
573 * Queries the pointer to the internal CPUMCTX structure.
574 *
575 * @returns The CPUMCTX pointer.
576 * @param pVCpu Handle to the virtual cpu.
577 */
578VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
579{
580 return &pVCpu->cpum.s.Guest;
581}
582
583VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
584{
585#ifdef VBOX_WITH_IEM
586# ifdef VBOX_WITH_RAW_MODE_NOT_R0
587 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
588 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
589# endif
590#endif
591 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
592 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
593 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
594 return VINF_SUCCESS; /* formality, consider it void. */
595}
596
597VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
598{
599#ifdef VBOX_WITH_IEM
600# ifdef VBOX_WITH_RAW_MODE_NOT_R0
601 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
602 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
603# endif
604#endif
605 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
606 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
607 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
608 return VINF_SUCCESS; /* formality, consider it void. */
609}
610
611VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
612{
613#ifdef VBOX_WITH_IEM
614# ifdef VBOX_WITH_RAW_MODE_NOT_R0
615 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
617# endif
618#endif
619 pVCpu->cpum.s.Guest.tr.Sel = tr;
620 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
621 return VINF_SUCCESS; /* formality, consider it void. */
622}
623
624VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
625{
626#ifdef VBOX_WITH_IEM
627# ifdef VBOX_WITH_RAW_MODE_NOT_R0
628 if ( ( ldtr != 0
629 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
630 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
631 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
632# endif
633#endif
634 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
635 /* The caller will set more hidden bits if it has them. */
636 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
637 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
638 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
639 return VINF_SUCCESS; /* formality, consider it void. */
640}
641
642
643/**
644 * Set the guest CR0.
645 *
646 * When called in GC, the hyper CR0 may be updated if that is
647 * required. The caller only has to take special action if AM,
648 * WP, PG or PE changes.
649 *
650 * @returns VINF_SUCCESS (consider it void).
651 * @param pVCpu Handle to the virtual cpu.
652 * @param cr0 The new CR0 value.
653 */
654VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
655{
656#ifdef IN_RC
657 /*
658 * Check if we need to change hypervisor CR0 because
659 * of math stuff.
660 */
661 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
662 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
663 {
664 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
665 {
666 /*
667 * We haven't saved the host FPU state yet, so TS and MT are both set
668 * and EM should be reflecting the guest EM (it always does this).
669 */
670 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
671 {
672 uint32_t HyperCR0 = ASMGetCR0();
673 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
674 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
675 HyperCR0 &= ~X86_CR0_EM;
676 HyperCR0 |= cr0 & X86_CR0_EM;
677 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
678 ASMSetCR0(HyperCR0);
679 }
680# ifdef VBOX_STRICT
681 else
682 {
683 uint32_t HyperCR0 = ASMGetCR0();
684 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
685 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
686 }
687# endif
688 }
689 else
690 {
691 /*
692 * Already saved the state, so we're just mirroring
693 * the guest flags.
694 */
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
697 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
698 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
699 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
700 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
701 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
702 ASMSetCR0(HyperCR0);
703 }
704 }
705#endif /* IN_RC */
706
707 /*
708 * Check for changes causing TLB flushes (for REM).
709 * The caller is responsible for calling PGM when appropriate.
710 */
711 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
712 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
713 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
714 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
715
716 /*
717 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
718 */
719 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
720 PGMCr0WpEnabled(pVCpu);
721
722 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
723 return VINF_SUCCESS;
724}
725
726
727VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
728{
729 pVCpu->cpum.s.Guest.cr2 = cr2;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
735{
736 pVCpu->cpum.s.Guest.cr3 = cr3;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
738 return VINF_SUCCESS;
739}
740
741
742VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
743{
744 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
745 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
746 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
747 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
748 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
749 cr4 &= ~X86_CR4_OSFSXR;
750 pVCpu->cpum.s.Guest.cr4 = cr4;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
756{
757 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
763{
764 pVCpu->cpum.s.Guest.eip = eip;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
770{
771 pVCpu->cpum.s.Guest.eax = eax;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
777{
778 pVCpu->cpum.s.Guest.ebx = ebx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
784{
785 pVCpu->cpum.s.Guest.ecx = ecx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
791{
792 pVCpu->cpum.s.Guest.edx = edx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
798{
799 pVCpu->cpum.s.Guest.esp = esp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
805{
806 pVCpu->cpum.s.Guest.ebp = ebp;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
812{
813 pVCpu->cpum.s.Guest.esi = esi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
819{
820 pVCpu->cpum.s.Guest.edi = edi;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
826{
827 pVCpu->cpum.s.Guest.ss.Sel = ss;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
833{
834 pVCpu->cpum.s.Guest.cs.Sel = cs;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
840{
841 pVCpu->cpum.s.Guest.ds.Sel = ds;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
847{
848 pVCpu->cpum.s.Guest.es.Sel = es;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
854{
855 pVCpu->cpum.s.Guest.fs.Sel = fs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
861{
862 pVCpu->cpum.s.Guest.gs.Sel = gs;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
868{
869 pVCpu->cpum.s.Guest.msrEFER = val;
870}
871
872
873VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
874{
875 if (pcbLimit)
876 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
877 return pVCpu->cpum.s.Guest.idtr.pIdt;
878}
879
880
881VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
882{
883 if (pHidden)
884 *pHidden = pVCpu->cpum.s.Guest.tr;
885 return pVCpu->cpum.s.Guest.tr.Sel;
886}
887
888
889VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
890{
891 return pVCpu->cpum.s.Guest.cs.Sel;
892}
893
894
895VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
896{
897 return pVCpu->cpum.s.Guest.ds.Sel;
898}
899
900
901VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
902{
903 return pVCpu->cpum.s.Guest.es.Sel;
904}
905
906
907VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
908{
909 return pVCpu->cpum.s.Guest.fs.Sel;
910}
911
912
913VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
914{
915 return pVCpu->cpum.s.Guest.gs.Sel;
916}
917
918
919VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
920{
921 return pVCpu->cpum.s.Guest.ss.Sel;
922}
923
924
925VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
926{
927 return pVCpu->cpum.s.Guest.ldtr.Sel;
928}
929
930
931VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
932{
933 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
934 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
935 return pVCpu->cpum.s.Guest.ldtr.Sel;
936}
937
938
939VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
940{
941 return pVCpu->cpum.s.Guest.cr0;
942}
943
944
945VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
946{
947 return pVCpu->cpum.s.Guest.cr2;
948}
949
950
951VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
952{
953 return pVCpu->cpum.s.Guest.cr3;
954}
955
956
957VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
958{
959 return pVCpu->cpum.s.Guest.cr4;
960}
961
962
963VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
964{
965 uint64_t u64;
966 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
967 if (RT_FAILURE(rc))
968 u64 = 0;
969 return u64;
970}
971
972
973VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
974{
975 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
976}
977
978
979VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
980{
981 return pVCpu->cpum.s.Guest.eip;
982}
983
984
985VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
986{
987 return pVCpu->cpum.s.Guest.rip;
988}
989
990
991VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
992{
993 return pVCpu->cpum.s.Guest.eax;
994}
995
996
997VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
998{
999 return pVCpu->cpum.s.Guest.ebx;
1000}
1001
1002
1003VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1004{
1005 return pVCpu->cpum.s.Guest.ecx;
1006}
1007
1008
1009VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1010{
1011 return pVCpu->cpum.s.Guest.edx;
1012}
1013
1014
1015VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1016{
1017 return pVCpu->cpum.s.Guest.esi;
1018}
1019
1020
1021VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1022{
1023 return pVCpu->cpum.s.Guest.edi;
1024}
1025
1026
1027VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1028{
1029 return pVCpu->cpum.s.Guest.esp;
1030}
1031
1032
1033VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1034{
1035 return pVCpu->cpum.s.Guest.ebp;
1036}
1037
1038
1039VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1040{
1041 return pVCpu->cpum.s.Guest.eflags.u32;
1042}
1043
1044
1045VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1046{
1047 switch (iReg)
1048 {
1049 case DISCREG_CR0:
1050 *pValue = pVCpu->cpum.s.Guest.cr0;
1051 break;
1052
1053 case DISCREG_CR2:
1054 *pValue = pVCpu->cpum.s.Guest.cr2;
1055 break;
1056
1057 case DISCREG_CR3:
1058 *pValue = pVCpu->cpum.s.Guest.cr3;
1059 break;
1060
1061 case DISCREG_CR4:
1062 *pValue = pVCpu->cpum.s.Guest.cr4;
1063 break;
1064
1065 case DISCREG_CR8:
1066 {
1067 uint8_t u8Tpr;
1068 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1069 if (RT_FAILURE(rc))
1070 {
1071 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1072 *pValue = 0;
1073 return rc;
1074 }
1075 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1076 break;
1077 }
1078
1079 default:
1080 return VERR_INVALID_PARAMETER;
1081 }
1082 return VINF_SUCCESS;
1083}
1084
1085
1086VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1087{
1088 return pVCpu->cpum.s.Guest.dr[0];
1089}
1090
1091
1092VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1093{
1094 return pVCpu->cpum.s.Guest.dr[1];
1095}
1096
1097
1098VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1099{
1100 return pVCpu->cpum.s.Guest.dr[2];
1101}
1102
1103
1104VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1105{
1106 return pVCpu->cpum.s.Guest.dr[3];
1107}
1108
1109
1110VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1111{
1112 return pVCpu->cpum.s.Guest.dr[6];
1113}
1114
1115
1116VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1117{
1118 return pVCpu->cpum.s.Guest.dr[7];
1119}
1120
1121
1122VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1123{
1124 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1125 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1126 if (iReg == 4 || iReg == 5)
1127 iReg += 2;
1128 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1129 return VINF_SUCCESS;
1130}
1131
1132
1133VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1134{
1135 return pVCpu->cpum.s.Guest.msrEFER;
1136}
1137
1138
1139/**
1140 * Looks up a CPUID leaf in the CPUID leaf array.
1141 *
1142 * @returns Pointer to the leaf if found, NULL if not.
1143 *
1144 * @param pVM Pointer to the cross context VM structure.
1145 * @param uLeaf The leaf to get.
1146 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1147 * isn't.
1148 */
1149PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf)
1150{
1151 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1152 if (iEnd)
1153 {
1154 unsigned iStart = 0;
1155 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1156 for (;;)
1157 {
1158 unsigned i = iStart + (iEnd - iStart) / 2U;
1159 if (uLeaf < paLeaves[i].uLeaf)
1160 {
1161 if (i <= iStart)
1162 return NULL;
1163 iEnd = i;
1164 }
1165 else if (uLeaf > paLeaves[i].uLeaf)
1166 {
1167 i += 1;
1168 if (i >= iEnd)
1169 return NULL;
1170 iStart = i;
1171 }
1172 else
1173 {
1174 uSubLeaf &= paLeaves[i].fSubLeafMask;
1175 if (uSubLeaf != paLeaves[i].uSubLeaf)
1176 {
1177 /* Find the right subleaf. We return the last one before
1178 uSubLeaf if we don't find an exact match. */
1179 if (uSubLeaf < paLeaves[i].uSubLeaf)
1180 while ( i > 0
1181 && uLeaf == paLeaves[i].uLeaf
1182 && uSubLeaf < paLeaves[i].uSubLeaf)
1183 i--;
1184 else
1185 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1186 && uLeaf == paLeaves[i + 1].uLeaf
1187 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1188 i++;
1189 }
1190 return &paLeaves[i];
1191 }
1192 }
1193 }
1194
1195 return NULL;
1196}
1197
1198
1199/**
1200 * Gets a CPUID leaf.
1201 *
1202 * @param pVCpu Pointer to the VMCPU.
1203 * @param iLeaf The CPUID leaf to get.
1204 * @param pEax Where to store the EAX value.
1205 * @param pEbx Where to store the EBX value.
1206 * @param pEcx Where to store the ECX value.
1207 * @param pEdx Where to store the EDX value.
1208 */
1209VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1210{
1211 PVM pVM = pVCpu->CTX_SUFF(pVM);
1212
1213 PCCPUMCPUID pCpuId;
1214 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
1215 pCpuId = &pVM->cpum.s.aGuestCpuIdPatmStd[iLeaf];
1216 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt))
1217 pCpuId = &pVM->cpum.s.aGuestCpuIdPatmExt[iLeaf - UINT32_C(0x80000000)];
1218 else if ( iLeaf - UINT32_C(0x40000000) < 0x100 /** @todo Fix this later: Hyper-V says 0x400000FF is the last valid leaf. */
1219 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdPatmStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */
1220 {
1221 PCPUMCPUIDLEAF pHyperLeaf = cpumCpuIdGetLeaf(pVM, iLeaf, 0 /* uSubLeaf */);
1222 if (RT_LIKELY(pHyperLeaf))
1223 {
1224 *pEax = pHyperLeaf->uEax;
1225 *pEbx = pHyperLeaf->uEbx;
1226 *pEcx = pHyperLeaf->uEcx;
1227 *pEdx = pHyperLeaf->uEdx;
1228 }
1229 else
1230 {
1231 *pEax = *pEbx = *pEcx = *pEdx = 0;
1232 }
1233 return;
1234 }
1235 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur))
1236 pCpuId = &pVM->cpum.s.aGuestCpuIdPatmCentaur[iLeaf - UINT32_C(0xc0000000)];
1237 else
1238 pCpuId = &pVM->cpum.s.GuestCpuIdPatmDef;
1239
1240 uint32_t cCurrentCacheIndex = *pEcx;
1241
1242 *pEax = pCpuId->eax;
1243 *pEbx = pCpuId->ebx;
1244 *pEcx = pCpuId->ecx;
1245 *pEdx = pCpuId->edx;
1246
1247 if ( iLeaf == 1)
1248 {
1249 /* Bits 31-24: Initial APIC ID */
1250 Assert(pVCpu->idCpu <= 255);
1251 *pEbx |= (pVCpu->idCpu << 24);
1252 }
1253
1254 if ( iLeaf == 4
1255 && cCurrentCacheIndex < 3
1256 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1257 {
1258 uint32_t type, level, sharing, linesize,
1259 partitions, associativity, sets, cores;
1260
1261 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1262 partitions = 1;
1263 /* Those are only to shut up compiler, as they will always
1264 get overwritten, and compiler should be able to figure that out */
1265 sets = associativity = sharing = level = 1;
1266 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1267 switch (cCurrentCacheIndex)
1268 {
1269 case 0:
1270 type = 1;
1271 level = 1;
1272 sharing = 1;
1273 linesize = 64;
1274 associativity = 8;
1275 sets = 64;
1276 break;
1277 case 1:
1278 level = 1;
1279 type = 2;
1280 sharing = 1;
1281 linesize = 64;
1282 associativity = 8;
1283 sets = 64;
1284 break;
1285 default: /* shut up gcc.*/
1286 AssertFailed();
1287 case 2:
1288 level = 2;
1289 type = 3;
1290 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1291 linesize = 64;
1292 associativity = 24;
1293 sets = 4096;
1294 break;
1295 }
1296
1297 NOREF(type);
1298 *pEax |= ((cores - 1) << 26) |
1299 ((sharing - 1) << 14) |
1300 (level << 5) |
1301 1;
1302 *pEbx = (linesize - 1) |
1303 ((partitions - 1) << 12) |
1304 ((associativity - 1) << 22); /* -1 encoding */
1305 *pEcx = sets - 1;
1306 }
1307
1308 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1309}
1310
1311
1312/**
1313 * Sets a CPUID feature bit.
1314 *
1315 * @param pVM Pointer to the VM.
1316 * @param enmFeature The feature to set.
1317 */
1318VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1319{
1320 PCPUMCPUIDLEAF pLeaf;
1321
1322 switch (enmFeature)
1323 {
1324 /*
1325 * Set the APIC bit in both feature masks.
1326 */
1327 case CPUMCPUIDFEATURE_APIC:
1328 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1329 if (pLeaf)
1330 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
1331
1332 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1333 if ( pLeaf
1334 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1335 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1336
1337 pVM->cpum.s.GuestFeatures.fApic = 1;
1338 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
1339 break;
1340
1341 /*
1342 * Set the x2APIC bit in the standard feature mask.
1343 */
1344 case CPUMCPUIDFEATURE_X2APIC:
1345 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1346 if (pLeaf)
1347 pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
1348 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
1349 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
1350 break;
1351
1352 /*
1353 * Set the sysenter/sysexit bit in the standard feature mask.
1354 * Assumes the caller knows what it's doing! (host must support these)
1355 */
1356 case CPUMCPUIDFEATURE_SEP:
1357 if (!pVM->cpum.s.HostFeatures.fSysEnter)
1358 {
1359 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1360 return;
1361 }
1362
1363 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1364 if (pLeaf)
1365 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
1366 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
1367 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
1368 break;
1369
1370 /*
1371 * Set the syscall/sysret bit in the extended feature mask.
1372 * Assumes the caller knows what it's doing! (host must support these)
1373 */
1374 case CPUMCPUIDFEATURE_SYSCALL:
1375 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1376 if ( !pLeaf
1377 || !pVM->cpum.s.HostFeatures.fSysCall)
1378 {
1379#if HC_ARCH_BITS == 32
1380 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
1381 mode by Intel, even when the cpu is capable of doing so in
1382 64-bit mode. Long mode requires syscall support. */
1383 if (!pVM->cpum.s.HostFeatures.fLongMode)
1384#endif
1385 {
1386 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
1387 return;
1388 }
1389 }
1390
1391 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1392 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1393 pVM->cpum.s.GuestFeatures.fSysCall = 1;
1394 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
1395 break;
1396
1397 /*
1398 * Set the PAE bit in both feature masks.
1399 * Assumes the caller knows what it's doing! (host must support these)
1400 */
1401 case CPUMCPUIDFEATURE_PAE:
1402 if (!pVM->cpum.s.HostFeatures.fPae)
1403 {
1404 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
1405 return;
1406 }
1407
1408 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1409 if (pLeaf)
1410 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
1411
1412 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1413 if ( pLeaf
1414 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1415 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1416
1417 pVM->cpum.s.GuestFeatures.fPae = 1;
1418 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
1419 break;
1420
1421 /*
1422 * Set the LONG MODE bit in the extended feature mask.
1423 * Assumes the caller knows what it's doing! (host must support these)
1424 */
1425 case CPUMCPUIDFEATURE_LONG_MODE:
1426 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1427 if ( !pLeaf
1428 || !pVM->cpum.s.HostFeatures.fLongMode)
1429 {
1430 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
1431 return;
1432 }
1433
1434 /* Valid for both Intel and AMD. */
1435 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1436 pVM->cpum.s.GuestFeatures.fLongMode = 1;
1437 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
1438 break;
1439
1440 /*
1441 * Set the NX/XD bit in the extended feature mask.
1442 * Assumes the caller knows what it's doing! (host must support these)
1443 */
1444 case CPUMCPUIDFEATURE_NX:
1445 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1446 if ( !pLeaf
1447 || !pVM->cpum.s.HostFeatures.fNoExecute)
1448 {
1449 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
1450 return;
1451 }
1452
1453 /* Valid for both Intel and AMD. */
1454 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1455 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
1456 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
1457 break;
1458
1459
1460 /*
1461 * Set the LAHF/SAHF support in 64-bit mode.
1462 * Assumes the caller knows what it's doing! (host must support this)
1463 */
1464 case CPUMCPUIDFEATURE_LAHF:
1465 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1466 if ( !pLeaf
1467 || !pVM->cpum.s.HostFeatures.fLahfSahf)
1468 {
1469 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
1470 return;
1471 }
1472
1473 /* Valid for both Intel and AMD. */
1474 pVM->cpum.s.aGuestCpuIdPatmExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1475 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
1476 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1477 break;
1478
1479 /*
1480 * Set the page attribute table bit. This is alternative page level
1481 * cache control that doesn't much matter when everything is
1482 * virtualized, though it may when passing thru device memory.
1483 */
1484 case CPUMCPUIDFEATURE_PAT:
1485 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1486 if (pLeaf)
1487 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
1488
1489 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1490 if ( pLeaf
1491 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1492 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1493
1494 pVM->cpum.s.GuestFeatures.fPat = 1;
1495 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
1496 break;
1497
1498 /*
1499 * Set the RDTSCP support bit.
1500 * Assumes the caller knows what it's doing! (host must support this)
1501 */
1502 case CPUMCPUIDFEATURE_RDTSCP:
1503 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1504 if ( !pLeaf
1505 || !pVM->cpum.s.HostFeatures.fRdTscP
1506 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1507 {
1508 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1509 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
1510 return;
1511 }
1512
1513 /* Valid for both Intel and AMD. */
1514 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1515 pVM->cpum.s.HostFeatures.fRdTscP = 1;
1516 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1517 break;
1518
1519 /*
1520 * Set the Hypervisor Present bit in the standard feature mask.
1521 */
1522 case CPUMCPUIDFEATURE_HVP:
1523 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1524 if (pLeaf)
1525 pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
1526 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
1527 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1528 break;
1529
1530 /*
1531 * Set the MWAIT Extensions Present bit in the MWAIT/MONITOR leaf.
1532 * This currently includes the Present bit and MWAITBREAK bit as well.
1533 */
1534 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1535 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0);
1536 if ( !pLeaf
1537 || !pVM->cpum.s.HostFeatures.fMWaitExtensions)
1538 {
1539 LogRel(("CPUM: WARNING! Can't turn on MWAIT Extensions when the host doesn't support it!\n"));
1540 return;
1541 }
1542
1543 /* Valid for both Intel and AMD. */
1544 pVM->cpum.s.aGuestCpuIdPatmStd[5].ecx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1545 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
1546 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
1547 break;
1548
1549 default:
1550 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1551 break;
1552 }
1553
1554 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1555 {
1556 PVMCPU pVCpu = &pVM->aCpus[i];
1557 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1558 }
1559}
1560
1561
1562/**
1563 * Queries a CPUID feature bit.
1564 *
1565 * @returns boolean for feature presence
1566 * @param pVM Pointer to the VM.
1567 * @param enmFeature The feature to query.
1568 */
1569VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1570{
1571 switch (enmFeature)
1572 {
1573 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
1574 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
1575 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
1576 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
1577 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
1578 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
1579 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
1580 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
1581 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat;
1582 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
1583 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
1584 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
1585
1586 case CPUMCPUIDFEATURE_INVALID:
1587 case CPUMCPUIDFEATURE_32BIT_HACK:
1588 break;
1589 }
1590 AssertFailed();
1591 return false;
1592}
1593
1594
1595/**
1596 * Clears a CPUID feature bit.
1597 *
1598 * @param pVM Pointer to the VM.
1599 * @param enmFeature The feature to clear.
1600 */
1601VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1602{
1603 PCPUMCPUIDLEAF pLeaf;
1604 switch (enmFeature)
1605 {
1606 case CPUMCPUIDFEATURE_APIC:
1607 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1608 if (pLeaf)
1609 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1610
1611 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1612 if ( pLeaf
1613 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1614 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1615
1616 pVM->cpum.s.GuestFeatures.fApic = 0;
1617 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
1618 break;
1619
1620 case CPUMCPUIDFEATURE_X2APIC:
1621 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1622 if (pLeaf)
1623 pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1624 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
1625 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
1626 break;
1627
1628 case CPUMCPUIDFEATURE_PAE:
1629 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1630 if (pLeaf)
1631 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
1632
1633 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1634 if ( pLeaf
1635 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1636 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1637
1638 pVM->cpum.s.GuestFeatures.fPae = 0;
1639 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
1640 break;
1641
1642 case CPUMCPUIDFEATURE_PAT:
1643 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1644 if (pLeaf)
1645 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
1646
1647 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1648 if ( pLeaf
1649 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1650 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1651
1652 pVM->cpum.s.GuestFeatures.fPat = 0;
1653 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
1654 break;
1655
1656 case CPUMCPUIDFEATURE_LONG_MODE:
1657 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1658 if (pLeaf)
1659 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1660 pVM->cpum.s.GuestFeatures.fLongMode = 0;
1661 break;
1662
1663 case CPUMCPUIDFEATURE_LAHF:
1664 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1665 if (pLeaf)
1666 pVM->cpum.s.aGuestCpuIdPatmExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1667 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
1668 break;
1669
1670 case CPUMCPUIDFEATURE_RDTSCP:
1671 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
1672 if (pLeaf)
1673 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1674 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
1675 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
1676 break;
1677
1678 case CPUMCPUIDFEATURE_HVP:
1679 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
1680 if (pLeaf)
1681 pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
1682 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
1683 break;
1684
1685 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1686 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0);
1687 if (pLeaf)
1688 pVM->cpum.s.aGuestCpuIdPatmStd[5].ecx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1689 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
1690 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
1691 break;
1692
1693 default:
1694 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1695 break;
1696 }
1697
1698 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1699 {
1700 PVMCPU pVCpu = &pVM->aCpus[i];
1701 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1702 }
1703}
1704
1705
1706/**
1707 * Gets the host CPU vendor.
1708 *
1709 * @returns CPU vendor.
1710 * @param pVM Pointer to the VM.
1711 */
1712VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1713{
1714 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1715}
1716
1717
1718/**
1719 * Gets the CPU vendor.
1720 *
1721 * @returns CPU vendor.
1722 * @param pVM Pointer to the VM.
1723 */
1724VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1725{
1726 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1727}
1728
1729
1730VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1731{
1732 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1733 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1734}
1735
1736
1737VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1738{
1739 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1740 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1741}
1742
1743
1744VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1745{
1746 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1747 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1748}
1749
1750
1751VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1752{
1753 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1754 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1755}
1756
1757
1758VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1759{
1760 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1761 return VINF_SUCCESS; /* No need to recalc. */
1762}
1763
1764
1765VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1766{
1767 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1768 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1769}
1770
1771
1772VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1773{
1774 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1775 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1776 if (iReg == 4 || iReg == 5)
1777 iReg += 2;
1778 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1779 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1780}
1781
1782
1783/**
1784 * Recalculates the hypervisor DRx register values based on current guest
1785 * registers and DBGF breakpoints, updating changed registers depending on the
1786 * context.
1787 *
1788 * This is called whenever a guest DRx register is modified (any context) and
1789 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1790 *
1791 * In raw-mode context this function will reload any (hyper) DRx registers which
1792 * comes out with a different value. It may also have to save the host debug
1793 * registers if that haven't been done already. In this context though, we'll
1794 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1795 * are only important when breakpoints are actually enabled.
1796 *
1797 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1798 * reloaded by the HM code if it changes. Further more, we will only use the
1799 * combined register set when the VBox debugger is actually using hardware BPs,
1800 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1801 * concern us here).
1802 *
1803 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1804 * all the time.
1805 *
1806 * @returns VINF_SUCCESS.
1807 * @param pVCpu Pointer to the VMCPU.
1808 * @param iGstReg The guest debug register number that was modified.
1809 * UINT8_MAX if not guest register.
1810 * @param fForceHyper Used in HM to force hyper registers because of single
1811 * stepping.
1812 */
1813VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1814{
1815 PVM pVM = pVCpu->CTX_SUFF(pVM);
1816
1817 /*
1818 * Compare the DR7s first.
1819 *
1820 * We only care about the enabled flags. GD is virtualized when we
1821 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1822 * always have the LE and GE bits set, so no need to check and disable
1823 * stuff if they're cleared like we have to for the guest DR7.
1824 */
1825 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1826 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1827 uGstDr7 = 0;
1828 else if (!(uGstDr7 & X86_DR7_LE))
1829 uGstDr7 &= ~X86_DR7_LE_ALL;
1830 else if (!(uGstDr7 & X86_DR7_GE))
1831 uGstDr7 &= ~X86_DR7_GE_ALL;
1832
1833 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1834
1835#ifdef IN_RING0
1836 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1837 fForceHyper = true;
1838#endif
1839 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1840 {
1841 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1842#ifdef IN_RC
1843 bool const fHmEnabled = false;
1844#elif defined(IN_RING3)
1845 bool const fHmEnabled = HMIsEnabled(pVM);
1846#endif
1847
1848 /*
1849 * Ok, something is enabled. Recalc each of the breakpoints, taking
1850 * the VM debugger ones of the guest ones. In raw-mode context we will
1851 * not allow breakpoints with values inside the hypervisor area.
1852 */
1853 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1854
1855 /* bp 0 */
1856 RTGCUINTREG uNewDr0;
1857 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1858 {
1859 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1860 uNewDr0 = DBGFBpGetDR0(pVM);
1861 }
1862 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1863 {
1864 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1865#ifndef IN_RING0
1866 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1867 uNewDr0 = 0;
1868 else
1869#endif
1870 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1871 }
1872 else
1873 uNewDr0 = 0;
1874
1875 /* bp 1 */
1876 RTGCUINTREG uNewDr1;
1877 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1878 {
1879 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1880 uNewDr1 = DBGFBpGetDR1(pVM);
1881 }
1882 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1883 {
1884 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1885#ifndef IN_RING0
1886 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1887 uNewDr1 = 0;
1888 else
1889#endif
1890 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1891 }
1892 else
1893 uNewDr1 = 0;
1894
1895 /* bp 2 */
1896 RTGCUINTREG uNewDr2;
1897 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1898 {
1899 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1900 uNewDr2 = DBGFBpGetDR2(pVM);
1901 }
1902 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1903 {
1904 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1905#ifndef IN_RING0
1906 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1907 uNewDr2 = 0;
1908 else
1909#endif
1910 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1911 }
1912 else
1913 uNewDr2 = 0;
1914
1915 /* bp 3 */
1916 RTGCUINTREG uNewDr3;
1917 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1918 {
1919 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1920 uNewDr3 = DBGFBpGetDR3(pVM);
1921 }
1922 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1923 {
1924 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1925#ifndef IN_RING0
1926 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1927 uNewDr3 = 0;
1928 else
1929#endif
1930 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1931 }
1932 else
1933 uNewDr3 = 0;
1934
1935 /*
1936 * Apply the updates.
1937 */
1938#ifdef IN_RC
1939 /* Make sure to save host registers first. */
1940 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1941 {
1942 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1943 {
1944 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1945 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1946 }
1947 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1948 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1949 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1950 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1951 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1952
1953 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1954 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1955 ASMSetDR0(uNewDr0);
1956 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1957 ASMSetDR1(uNewDr1);
1958 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1959 ASMSetDR2(uNewDr2);
1960 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1961 ASMSetDR3(uNewDr3);
1962 ASMSetDR6(X86_DR6_INIT_VAL);
1963 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1964 ASMSetDR7(uNewDr7);
1965 }
1966 else
1967#endif
1968 {
1969 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1970 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1971 CPUMSetHyperDR3(pVCpu, uNewDr3);
1972 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1973 CPUMSetHyperDR2(pVCpu, uNewDr2);
1974 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1975 CPUMSetHyperDR1(pVCpu, uNewDr1);
1976 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1977 CPUMSetHyperDR0(pVCpu, uNewDr0);
1978 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1979 CPUMSetHyperDR7(pVCpu, uNewDr7);
1980 }
1981 }
1982#ifdef IN_RING0
1983 else if (CPUMIsGuestDebugStateActive(pVCpu))
1984 {
1985 /*
1986 * Reload the register that was modified. Normally this won't happen
1987 * as we won't intercept DRx writes when not having the hyper debug
1988 * state loaded, but in case we do for some reason we'll simply deal
1989 * with it.
1990 */
1991 switch (iGstReg)
1992 {
1993 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1994 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1995 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1996 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1997 default:
1998 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1999 }
2000 }
2001#endif
2002 else
2003 {
2004 /*
2005 * No active debug state any more. In raw-mode this means we have to
2006 * make sure DR7 has everything disabled now, if we armed it already.
2007 * In ring-0 we might end up here when just single stepping.
2008 */
2009#if defined(IN_RC) || defined(IN_RING0)
2010 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2011 {
2012# ifdef IN_RC
2013 ASMSetDR7(X86_DR7_INIT_VAL);
2014# endif
2015 if (pVCpu->cpum.s.Hyper.dr[0])
2016 ASMSetDR0(0);
2017 if (pVCpu->cpum.s.Hyper.dr[1])
2018 ASMSetDR1(0);
2019 if (pVCpu->cpum.s.Hyper.dr[2])
2020 ASMSetDR2(0);
2021 if (pVCpu->cpum.s.Hyper.dr[3])
2022 ASMSetDR3(0);
2023 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2024 }
2025#endif
2026 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2027
2028 /* Clear all the registers. */
2029 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2030 pVCpu->cpum.s.Hyper.dr[3] = 0;
2031 pVCpu->cpum.s.Hyper.dr[2] = 0;
2032 pVCpu->cpum.s.Hyper.dr[1] = 0;
2033 pVCpu->cpum.s.Hyper.dr[0] = 0;
2034
2035 }
2036 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2037 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2038 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2039 pVCpu->cpum.s.Hyper.dr[7]));
2040
2041 return VINF_SUCCESS;
2042}
2043
2044
2045/**
2046 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2047 *
2048 * @returns true if in real mode, otherwise false.
2049 * @param pVCpu Pointer to the VMCPU.
2050 */
2051VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2052{
2053 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2054}
2055
2056
2057/**
2058 * Tests if the guest has the Page Size Extension enabled (PSE).
2059 *
2060 * @returns true if in real mode, otherwise false.
2061 * @param pVCpu Pointer to the VMCPU.
2062 */
2063VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2064{
2065 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2066 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2067}
2068
2069
2070/**
2071 * Tests if the guest has the paging enabled (PG).
2072 *
2073 * @returns true if in real mode, otherwise false.
2074 * @param pVCpu Pointer to the VMCPU.
2075 */
2076VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2077{
2078 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2079}
2080
2081
2082/**
2083 * Tests if the guest has the paging enabled (PG).
2084 *
2085 * @returns true if in real mode, otherwise false.
2086 * @param pVCpu Pointer to the VMCPU.
2087 */
2088VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2089{
2090 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2091}
2092
2093
2094/**
2095 * Tests if the guest is running in real mode or not.
2096 *
2097 * @returns true if in real mode, otherwise false.
2098 * @param pVCpu Pointer to the VMCPU.
2099 */
2100VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2101{
2102 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2103}
2104
2105
2106/**
2107 * Tests if the guest is running in real or virtual 8086 mode.
2108 *
2109 * @returns @c true if it is, @c false if not.
2110 * @param pVCpu Pointer to the VMCPU.
2111 */
2112VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2113{
2114 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2115 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2116}
2117
2118
2119/**
2120 * Tests if the guest is running in protected or not.
2121 *
2122 * @returns true if in protected mode, otherwise false.
2123 * @param pVCpu Pointer to the VMCPU.
2124 */
2125VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2126{
2127 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2128}
2129
2130
2131/**
2132 * Tests if the guest is running in paged protected or not.
2133 *
2134 * @returns true if in paged protected mode, otherwise false.
2135 * @param pVCpu Pointer to the VMCPU.
2136 */
2137VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2138{
2139 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2140}
2141
2142
2143/**
2144 * Tests if the guest is running in long mode or not.
2145 *
2146 * @returns true if in long mode, otherwise false.
2147 * @param pVCpu Pointer to the VMCPU.
2148 */
2149VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2150{
2151 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2152}
2153
2154
2155/**
2156 * Tests if the guest is running in PAE mode or not.
2157 *
2158 * @returns true if in PAE mode, otherwise false.
2159 * @param pVCpu Pointer to the VMCPU.
2160 */
2161VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2162{
2163 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2164 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2165 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2166 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2167 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2168}
2169
2170
2171/**
2172 * Tests if the guest is running in 64 bits mode or not.
2173 *
2174 * @returns true if in 64 bits protected mode, otherwise false.
2175 * @param pVCpu The current virtual CPU.
2176 */
2177VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2178{
2179 if (!CPUMIsGuestInLongMode(pVCpu))
2180 return false;
2181 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2182 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2183}
2184
2185
2186/**
2187 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2188 * registers.
2189 *
2190 * @returns true if in 64 bits protected mode, otherwise false.
2191 * @param pCtx Pointer to the current guest CPU context.
2192 */
2193VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2194{
2195 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2196}
2197
2198#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2199
2200/**
2201 *
2202 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2203 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2204 * @param pVCpu The current virtual CPU.
2205 */
2206VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2207{
2208 return pVCpu->cpum.s.fRawEntered;
2209}
2210
2211/**
2212 * Transforms the guest CPU state to raw-ring mode.
2213 *
2214 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2215 *
2216 * @returns VBox status. (recompiler failure)
2217 * @param pVCpu Pointer to the VMCPU.
2218 * @param pCtxCore The context core (for trap usage).
2219 * @see @ref pg_raw
2220 */
2221VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
2222{
2223 PVM pVM = pVCpu->CTX_SUFF(pVM);
2224
2225 Assert(!pVCpu->cpum.s.fRawEntered);
2226 Assert(!pVCpu->cpum.s.fRemEntered);
2227 if (!pCtxCore)
2228 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
2229
2230 /*
2231 * Are we in Ring-0?
2232 */
2233 if ( pCtxCore->ss.Sel
2234 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
2235 && !pCtxCore->eflags.Bits.u1VM)
2236 {
2237 /*
2238 * Enter execution mode.
2239 */
2240 PATMRawEnter(pVM, pCtxCore);
2241
2242 /*
2243 * Set CPL to Ring-1.
2244 */
2245 pCtxCore->ss.Sel |= 1;
2246 if ( pCtxCore->cs.Sel
2247 && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
2248 pCtxCore->cs.Sel |= 1;
2249 }
2250 else
2251 {
2252# ifdef VBOX_WITH_RAW_RING1
2253 if ( EMIsRawRing1Enabled(pVM)
2254 && !pCtxCore->eflags.Bits.u1VM
2255 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
2256 {
2257 /* Set CPL to Ring-2. */
2258 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
2259 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2260 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
2261 }
2262# else
2263 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
2264 ("ring-1 code not supported\n"));
2265# endif
2266 /*
2267 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2268 */
2269 PATMRawEnter(pVM, pCtxCore);
2270 }
2271
2272 /*
2273 * Assert sanity.
2274 */
2275 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2276 AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
2277 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2278 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2279
2280 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
2281
2282 pVCpu->cpum.s.fRawEntered = true;
2283 return VINF_SUCCESS;
2284}
2285
2286
2287/**
2288 * Transforms the guest CPU state from raw-ring mode to correct values.
2289 *
2290 * This function will change any selector registers with DPL=1 to DPL=0.
2291 *
2292 * @returns Adjusted rc.
2293 * @param pVCpu Pointer to the VMCPU.
2294 * @param rc Raw mode return code
2295 * @param pCtxCore The context core (for trap usage).
2296 * @see @ref pg_raw
2297 */
2298VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
2299{
2300 PVM pVM = pVCpu->CTX_SUFF(pVM);
2301
2302 /*
2303 * Don't leave if we've already left (in RC).
2304 */
2305 Assert(!pVCpu->cpum.s.fRemEntered);
2306 if (!pVCpu->cpum.s.fRawEntered)
2307 return rc;
2308 pVCpu->cpum.s.fRawEntered = false;
2309
2310 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2311 if (!pCtxCore)
2312 pCtxCore = CPUMCTX2CORE(pCtx);
2313 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
2314 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
2315 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
2316
2317 /*
2318 * Are we executing in raw ring-1?
2319 */
2320 if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
2321 && !pCtxCore->eflags.Bits.u1VM)
2322 {
2323 /*
2324 * Leave execution mode.
2325 */
2326 PATMRawLeave(pVM, pCtxCore, rc);
2327 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2328 /** @todo See what happens if we remove this. */
2329 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
2330 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
2331 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
2332 pCtxCore->es.Sel &= ~X86_SEL_RPL;
2333 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
2334 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
2335 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
2336 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
2337
2338 /*
2339 * Ring-1 selector => Ring-0.
2340 */
2341 pCtxCore->ss.Sel &= ~X86_SEL_RPL;
2342 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
2343 pCtxCore->cs.Sel &= ~X86_SEL_RPL;
2344 }
2345 else
2346 {
2347 /*
2348 * PATM is taking care of the IOPL and IF flags for us.
2349 */
2350 PATMRawLeave(pVM, pCtxCore, rc);
2351 if (!pCtxCore->eflags.Bits.u1VM)
2352 {
2353# ifdef VBOX_WITH_RAW_RING1
2354 if ( EMIsRawRing1Enabled(pVM)
2355 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)
2356 {
2357 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2358 /** @todo See what happens if we remove this. */
2359 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2)
2360 pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1;
2361 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2)
2362 pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1;
2363 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2)
2364 pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1;
2365 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2)
2366 pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1;
2367
2368 /*
2369 * Ring-2 selector => Ring-1.
2370 */
2371 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1;
2372 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2)
2373 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1;
2374 }
2375 else
2376 {
2377# endif
2378 /** @todo See what happens if we remove this. */
2379 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
2380 pCtxCore->ds.Sel &= ~X86_SEL_RPL;
2381 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
2382 pCtxCore->es.Sel &= ~X86_SEL_RPL;
2383 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
2384 pCtxCore->fs.Sel &= ~X86_SEL_RPL;
2385 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
2386 pCtxCore->gs.Sel &= ~X86_SEL_RPL;
2387# ifdef VBOX_WITH_RAW_RING1
2388 }
2389# endif
2390 }
2391 }
2392
2393 return rc;
2394}
2395
2396#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2397
2398/**
2399 * Updates the EFLAGS while we're in raw-mode.
2400 *
2401 * @param pVCpu Pointer to the VMCPU.
2402 * @param fEfl The new EFLAGS value.
2403 */
2404VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2405{
2406#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2407 if (pVCpu->cpum.s.fRawEntered)
2408 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
2409 else
2410#endif
2411 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2412}
2413
2414
2415/**
2416 * Gets the EFLAGS while we're in raw-mode.
2417 *
2418 * @returns The eflags.
2419 * @param pVCpu Pointer to the current virtual CPU.
2420 */
2421VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2422{
2423#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2424 if (pVCpu->cpum.s.fRawEntered)
2425 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
2426#endif
2427 return pVCpu->cpum.s.Guest.eflags.u32;
2428}
2429
2430
2431/**
2432 * Sets the specified changed flags (CPUM_CHANGED_*).
2433 *
2434 * @param pVCpu Pointer to the current virtual CPU.
2435 */
2436VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2437{
2438 pVCpu->cpum.s.fChanged |= fChangedFlags;
2439}
2440
2441
2442/**
2443 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2444 * @returns true if supported.
2445 * @returns false if not supported.
2446 * @param pVM Pointer to the VM.
2447 */
2448VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2449{
2450 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2451}
2452
2453
2454/**
2455 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2456 * @returns true if used.
2457 * @returns false if not used.
2458 * @param pVM Pointer to the VM.
2459 */
2460VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2461{
2462 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2463}
2464
2465
2466/**
2467 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2468 * @returns true if used.
2469 * @returns false if not used.
2470 * @param pVM Pointer to the VM.
2471 */
2472VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2473{
2474 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2475}
2476
2477#ifdef IN_RC
2478
2479/**
2480 * Lazily sync in the FPU/XMM state.
2481 *
2482 * @returns VBox status code.
2483 * @param pVCpu Pointer to the VMCPU.
2484 */
2485VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2486{
2487 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2488}
2489
2490#endif /* !IN_RC */
2491
2492/**
2493 * Checks if we activated the FPU/XMM state of the guest OS.
2494 * @returns true if we did.
2495 * @returns false if not.
2496 * @param pVCpu Pointer to the VMCPU.
2497 */
2498VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2499{
2500 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
2501}
2502
2503
2504/**
2505 * Deactivate the FPU/XMM state of the guest OS.
2506 * @param pVCpu Pointer to the VMCPU.
2507 *
2508 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled
2509 * FPU state management.
2510 */
2511VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2512{
2513 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
2514 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2515}
2516
2517
2518/**
2519 * Checks if the guest debug state is active.
2520 *
2521 * @returns boolean
2522 * @param pVM Pointer to the VMCPU.
2523 */
2524VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2525{
2526 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2527}
2528
2529
2530/**
2531 * Checks if the guest debug state is to be made active during the world-switch
2532 * (currently only used for the 32->64 switcher case).
2533 *
2534 * @returns boolean
2535 * @param pVM Pointer to the VMCPU.
2536 */
2537VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2538{
2539 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2540}
2541
2542
2543/**
2544 * Checks if the hyper debug state is active.
2545 *
2546 * @returns boolean
2547 * @param pVM Pointer to the VM.
2548 */
2549VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2550{
2551 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2552}
2553
2554
2555/**
2556 * Checks if the hyper debug state is to be made active during the world-switch
2557 * (currently only used for the 32->64 switcher case).
2558 *
2559 * @returns boolean
2560 * @param pVM Pointer to the VMCPU.
2561 */
2562VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2563{
2564 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2565}
2566
2567
2568/**
2569 * Mark the guest's debug state as inactive.
2570 *
2571 * @returns boolean
2572 * @param pVM Pointer to the VM.
2573 * @todo This API doesn't make sense any more.
2574 */
2575VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2576{
2577 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2578}
2579
2580
2581/**
2582 * Get the current privilege level of the guest.
2583 *
2584 * @returns CPL
2585 * @param pVCpu Pointer to the current virtual CPU.
2586 */
2587VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2588{
2589 /*
2590 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2591 *
2592 * Note! We used to check CS.DPL here, assuming it was always equal to
2593 * CPL even if a conforming segment was loaded. But this truned out to
2594 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2595 * during install after a far call to ring 2 with VT-x. Then on newer
2596 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2597 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2598 *
2599 * So, forget CS.DPL, always use SS.DPL.
2600 *
2601 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2602 * isn't necessarily equal if the segment is conforming.
2603 * See section 4.11.1 in the AMD manual.
2604 *
2605 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2606 * right after real->prot mode switch and when in V8086 mode? That
2607 * section says the RPL specified in a direct transfere (call, jmp,
2608 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2609 * it would be impossible for an exception handle or the iret
2610 * instruction to figure out whether SS:ESP are part of the frame
2611 * or not. VBox or qemu bug must've lead to this misconception.
2612 *
2613 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2614 * selector into SS with an RPL other than the CPL when CPL != 3 and
2615 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2616 * RPL = CPL. Weird.
2617 */
2618 uint32_t uCpl;
2619 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2620 {
2621 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2622 {
2623 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2624 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2625 else
2626 {
2627 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2628#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2629# ifdef VBOX_WITH_RAW_RING1
2630 if (pVCpu->cpum.s.fRawEntered)
2631 {
2632 if ( uCpl == 2
2633 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2634 uCpl = 1;
2635 else if (uCpl == 1)
2636 uCpl = 0;
2637 }
2638 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2639# else
2640 if (uCpl == 1)
2641 uCpl = 0;
2642# endif
2643#endif
2644 }
2645 }
2646 else
2647 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2648 }
2649 else
2650 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2651 return uCpl;
2652}
2653
2654
2655/**
2656 * Gets the current guest CPU mode.
2657 *
2658 * If paging mode is what you need, check out PGMGetGuestMode().
2659 *
2660 * @returns The CPU mode.
2661 * @param pVCpu Pointer to the VMCPU.
2662 */
2663VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2664{
2665 CPUMMODE enmMode;
2666 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2667 enmMode = CPUMMODE_REAL;
2668 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2669 enmMode = CPUMMODE_PROTECTED;
2670 else
2671 enmMode = CPUMMODE_LONG;
2672
2673 return enmMode;
2674}
2675
2676
2677/**
2678 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2679 *
2680 * @returns 16, 32 or 64.
2681 * @param pVCpu The current virtual CPU.
2682 */
2683VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2684{
2685 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2686 return 16;
2687
2688 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2689 {
2690 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2691 return 16;
2692 }
2693
2694 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2695 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2696 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2697 return 64;
2698
2699 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2700 return 32;
2701
2702 return 16;
2703}
2704
2705
2706VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2707{
2708 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2709 return DISCPUMODE_16BIT;
2710
2711 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2712 {
2713 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2714 return DISCPUMODE_16BIT;
2715 }
2716
2717 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2718 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2719 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2720 return DISCPUMODE_64BIT;
2721
2722 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2723 return DISCPUMODE_32BIT;
2724
2725 return DISCPUMODE_16BIT;
2726}
2727
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette