VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 55302

Last change on this file since 55302 was 55292, checked in by vboxsync, 10 years ago

HM,CPUM,IEM: XSETBV fixes and adjustments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 83.8 KB
Line 
1/* $Id: CPUMAllRegs.cpp 55292 2015-04-15 15:31:51Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*******************************************************************************
57* Defined Constants And Macros *
58*******************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The current Virtual CPU.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The current virtual CPU.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The current virtual CPU.
156 */
157VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
158{
159 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
160}
161
162#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
163
164
165/**
166 * Obsolete.
167 *
168 * We don't support nested hypervisor context interrupts or traps. Life is much
169 * simpler when we don't. It's also slightly faster at times.
170 *
171 * @param pVM Handle to the virtual machine.
172 */
173VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
174{
175 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
176}
177
178
179/**
180 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
181 *
182 * @param pVCpu Pointer to the VMCPU.
183 */
184VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
185{
186 return &pVCpu->cpum.s.Hyper;
187}
188
189
190VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
191{
192 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
193 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
194}
195
196
197VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
198{
199 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
200 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
201}
202
203
204VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
205{
206 pVCpu->cpum.s.Hyper.cr3 = cr3;
207
208#ifdef IN_RC
209 /* Update the current CR3. */
210 ASMSetCR3(cr3);
211#endif
212}
213
214VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
215{
216 return pVCpu->cpum.s.Hyper.cr3;
217}
218
219
220VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
221{
222 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
223}
224
225
226VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
227{
228 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
229}
230
231
232VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
233{
234 pVCpu->cpum.s.Hyper.es.Sel = SelES;
235}
236
237
238VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
239{
240 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
241}
242
243
244VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
245{
246 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
247}
248
249
250VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
251{
252 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
253}
254
255
256VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
257{
258 pVCpu->cpum.s.Hyper.esp = u32ESP;
259}
260
261
262VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
263{
264 pVCpu->cpum.s.Hyper.esp = u32ESP;
265}
266
267
268VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
269{
270 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
271 return VINF_SUCCESS;
272}
273
274
275VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
276{
277 pVCpu->cpum.s.Hyper.eip = u32EIP;
278}
279
280
281/**
282 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
283 * EFLAGS and EIP prior to resuming guest execution.
284 *
285 * All general register not given as a parameter will be set to 0. The EFLAGS
286 * register will be set to sane values for C/C++ code execution with interrupts
287 * disabled and IOPL 0.
288 *
289 * @param pVCpu The current virtual CPU.
290 * @param u32EIP The EIP value.
291 * @param u32ESP The ESP value.
292 * @param u32EAX The EAX value.
293 * @param u32EDX The EDX value.
294 */
295VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
296{
297 pVCpu->cpum.s.Hyper.eip = u32EIP;
298 pVCpu->cpum.s.Hyper.esp = u32ESP;
299 pVCpu->cpum.s.Hyper.eax = u32EAX;
300 pVCpu->cpum.s.Hyper.edx = u32EDX;
301 pVCpu->cpum.s.Hyper.ecx = 0;
302 pVCpu->cpum.s.Hyper.ebx = 0;
303 pVCpu->cpum.s.Hyper.ebp = 0;
304 pVCpu->cpum.s.Hyper.esi = 0;
305 pVCpu->cpum.s.Hyper.edi = 0;
306 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
307}
308
309
310VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
311{
312 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
313}
314
315
316VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
317{
318 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
319}
320
321
322/** @MAYBE_LOAD_DRx
323 * Macro for updating DRx values in raw-mode and ring-0 contexts.
324 */
325#ifdef IN_RING0
326# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
327# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
328# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
329 do { \
330 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
331 a_fnLoad(a_uValue); \
332 else \
333 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
334 } while (0)
335# else
336# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
337 do { \
338 /** @todo we're not loading the correct guest value here! */ \
339 a_fnLoad(a_uValue); \
340 } while (0)
341# endif
342# else
343# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
344 do { \
345 a_fnLoad(a_uValue); \
346 } while (0)
347# endif
348
349#elif defined(IN_RC)
350# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
351 do { \
352 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
353 { a_fnLoad(a_uValue); } \
354 } while (0)
355
356#else
357# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
358#endif
359
360VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
361{
362 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
363 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
364}
365
366
367VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
368{
369 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
370 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
371}
372
373
374VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
375{
376 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
377 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
378}
379
380
381VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
382{
383 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
384 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
385}
386
387
388VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
389{
390 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
391}
392
393
394VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
395{
396 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
397#ifdef IN_RC
398 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
399#endif
400}
401
402
403VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
404{
405 return pVCpu->cpum.s.Hyper.cs.Sel;
406}
407
408
409VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
410{
411 return pVCpu->cpum.s.Hyper.ds.Sel;
412}
413
414
415VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
416{
417 return pVCpu->cpum.s.Hyper.es.Sel;
418}
419
420
421VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
422{
423 return pVCpu->cpum.s.Hyper.fs.Sel;
424}
425
426
427VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
428{
429 return pVCpu->cpum.s.Hyper.gs.Sel;
430}
431
432
433VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
434{
435 return pVCpu->cpum.s.Hyper.ss.Sel;
436}
437
438
439VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
440{
441 return pVCpu->cpum.s.Hyper.eax;
442}
443
444
445VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
446{
447 return pVCpu->cpum.s.Hyper.ebx;
448}
449
450
451VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
452{
453 return pVCpu->cpum.s.Hyper.ecx;
454}
455
456
457VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
458{
459 return pVCpu->cpum.s.Hyper.edx;
460}
461
462
463VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
464{
465 return pVCpu->cpum.s.Hyper.esi;
466}
467
468
469VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
470{
471 return pVCpu->cpum.s.Hyper.edi;
472}
473
474
475VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
476{
477 return pVCpu->cpum.s.Hyper.ebp;
478}
479
480
481VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
482{
483 return pVCpu->cpum.s.Hyper.esp;
484}
485
486
487VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
488{
489 return pVCpu->cpum.s.Hyper.eflags.u32;
490}
491
492
493VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
494{
495 return pVCpu->cpum.s.Hyper.eip;
496}
497
498
499VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
500{
501 return pVCpu->cpum.s.Hyper.rip;
502}
503
504
505VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
506{
507 if (pcbLimit)
508 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
509 return pVCpu->cpum.s.Hyper.idtr.pIdt;
510}
511
512
513VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
514{
515 if (pcbLimit)
516 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
517 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
518}
519
520
521VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
522{
523 return pVCpu->cpum.s.Hyper.ldtr.Sel;
524}
525
526
527VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
528{
529 return pVCpu->cpum.s.Hyper.dr[0];
530}
531
532
533VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
534{
535 return pVCpu->cpum.s.Hyper.dr[1];
536}
537
538
539VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
540{
541 return pVCpu->cpum.s.Hyper.dr[2];
542}
543
544
545VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
546{
547 return pVCpu->cpum.s.Hyper.dr[3];
548}
549
550
551VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
552{
553 return pVCpu->cpum.s.Hyper.dr[6];
554}
555
556
557VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
558{
559 return pVCpu->cpum.s.Hyper.dr[7];
560}
561
562
563/**
564 * Gets the pointer to the internal CPUMCTXCORE structure.
565 * This is only for reading in order to save a few calls.
566 *
567 * @param pVCpu Handle to the virtual cpu.
568 */
569VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
570{
571 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
572}
573
574
575/**
576 * Queries the pointer to the internal CPUMCTX structure.
577 *
578 * @returns The CPUMCTX pointer.
579 * @param pVCpu Handle to the virtual cpu.
580 */
581VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
582{
583 return &pVCpu->cpum.s.Guest;
584}
585
586VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
587{
588#ifdef VBOX_WITH_IEM
589# ifdef VBOX_WITH_RAW_MODE_NOT_R0
590 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
591 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
592# endif
593#endif
594 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
595 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
596 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
597 return VINF_SUCCESS; /* formality, consider it void. */
598}
599
600VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
601{
602#ifdef VBOX_WITH_IEM
603# ifdef VBOX_WITH_RAW_MODE_NOT_R0
604 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
605 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
606# endif
607#endif
608 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
609 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
610 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
611 return VINF_SUCCESS; /* formality, consider it void. */
612}
613
614VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
615{
616#ifdef VBOX_WITH_IEM
617# ifdef VBOX_WITH_RAW_MODE_NOT_R0
618 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
619 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
620# endif
621#endif
622 pVCpu->cpum.s.Guest.tr.Sel = tr;
623 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
624 return VINF_SUCCESS; /* formality, consider it void. */
625}
626
627VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
628{
629#ifdef VBOX_WITH_IEM
630# ifdef VBOX_WITH_RAW_MODE_NOT_R0
631 if ( ( ldtr != 0
632 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
633 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
634 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
635# endif
636#endif
637 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
638 /* The caller will set more hidden bits if it has them. */
639 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
640 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
641 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
642 return VINF_SUCCESS; /* formality, consider it void. */
643}
644
645
646/**
647 * Set the guest CR0.
648 *
649 * When called in GC, the hyper CR0 may be updated if that is
650 * required. The caller only has to take special action if AM,
651 * WP, PG or PE changes.
652 *
653 * @returns VINF_SUCCESS (consider it void).
654 * @param pVCpu Handle to the virtual cpu.
655 * @param cr0 The new CR0 value.
656 */
657VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
658{
659#ifdef IN_RC
660 /*
661 * Check if we need to change hypervisor CR0 because
662 * of math stuff.
663 */
664 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
665 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
666 {
667 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
668 {
669 /*
670 * We haven't saved the host FPU state yet, so TS and MT are both set
671 * and EM should be reflecting the guest EM (it always does this).
672 */
673 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
674 {
675 uint32_t HyperCR0 = ASMGetCR0();
676 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
677 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
678 HyperCR0 &= ~X86_CR0_EM;
679 HyperCR0 |= cr0 & X86_CR0_EM;
680 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
681 ASMSetCR0(HyperCR0);
682 }
683# ifdef VBOX_STRICT
684 else
685 {
686 uint32_t HyperCR0 = ASMGetCR0();
687 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
688 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
689 }
690# endif
691 }
692 else
693 {
694 /*
695 * Already saved the state, so we're just mirroring
696 * the guest flags.
697 */
698 uint32_t HyperCR0 = ASMGetCR0();
699 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
700 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
701 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
702 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
703 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
704 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
705 ASMSetCR0(HyperCR0);
706 }
707 }
708#endif /* IN_RC */
709
710 /*
711 * Check for changes causing TLB flushes (for REM).
712 * The caller is responsible for calling PGM when appropriate.
713 */
714 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
715 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
716 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
717 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
718
719 /*
720 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
721 */
722 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
723 PGMCr0WpEnabled(pVCpu);
724
725 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
726 return VINF_SUCCESS;
727}
728
729
730VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
731{
732 pVCpu->cpum.s.Guest.cr2 = cr2;
733 return VINF_SUCCESS;
734}
735
736
737VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
738{
739 pVCpu->cpum.s.Guest.cr3 = cr3;
740 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
741 return VINF_SUCCESS;
742}
743
744
745VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
746{
747 /*
748 * The CR4.OSXSAVE bit is reflected in CPUID(1).ECX[27].
749 */
750 if ( (cr4 & X86_CR4_OSXSAVE)
751 != (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE) )
752 {
753 PVM pVM = pVCpu->CTX_SUFF(pVM);
754 if (cr4 & X86_CR4_OSXSAVE)
755 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
756 else
757 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
758 }
759
760 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
761 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
762 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
763
764 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
765 pVCpu->cpum.s.Guest.cr4 = cr4;
766 return VINF_SUCCESS;
767}
768
769
770VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
771{
772 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
773 return VINF_SUCCESS;
774}
775
776
777VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
778{
779 pVCpu->cpum.s.Guest.eip = eip;
780 return VINF_SUCCESS;
781}
782
783
784VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
785{
786 pVCpu->cpum.s.Guest.eax = eax;
787 return VINF_SUCCESS;
788}
789
790
791VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
792{
793 pVCpu->cpum.s.Guest.ebx = ebx;
794 return VINF_SUCCESS;
795}
796
797
798VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
799{
800 pVCpu->cpum.s.Guest.ecx = ecx;
801 return VINF_SUCCESS;
802}
803
804
805VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
806{
807 pVCpu->cpum.s.Guest.edx = edx;
808 return VINF_SUCCESS;
809}
810
811
812VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
813{
814 pVCpu->cpum.s.Guest.esp = esp;
815 return VINF_SUCCESS;
816}
817
818
819VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
820{
821 pVCpu->cpum.s.Guest.ebp = ebp;
822 return VINF_SUCCESS;
823}
824
825
826VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
827{
828 pVCpu->cpum.s.Guest.esi = esi;
829 return VINF_SUCCESS;
830}
831
832
833VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
834{
835 pVCpu->cpum.s.Guest.edi = edi;
836 return VINF_SUCCESS;
837}
838
839
840VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
841{
842 pVCpu->cpum.s.Guest.ss.Sel = ss;
843 return VINF_SUCCESS;
844}
845
846
847VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
848{
849 pVCpu->cpum.s.Guest.cs.Sel = cs;
850 return VINF_SUCCESS;
851}
852
853
854VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
855{
856 pVCpu->cpum.s.Guest.ds.Sel = ds;
857 return VINF_SUCCESS;
858}
859
860
861VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
862{
863 pVCpu->cpum.s.Guest.es.Sel = es;
864 return VINF_SUCCESS;
865}
866
867
868VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
869{
870 pVCpu->cpum.s.Guest.fs.Sel = fs;
871 return VINF_SUCCESS;
872}
873
874
875VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
876{
877 pVCpu->cpum.s.Guest.gs.Sel = gs;
878 return VINF_SUCCESS;
879}
880
881
882VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
883{
884 pVCpu->cpum.s.Guest.msrEFER = val;
885}
886
887
888VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
889{
890 if (pcbLimit)
891 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
892 return pVCpu->cpum.s.Guest.idtr.pIdt;
893}
894
895
896VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
897{
898 if (pHidden)
899 *pHidden = pVCpu->cpum.s.Guest.tr;
900 return pVCpu->cpum.s.Guest.tr.Sel;
901}
902
903
904VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
905{
906 return pVCpu->cpum.s.Guest.cs.Sel;
907}
908
909
910VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
911{
912 return pVCpu->cpum.s.Guest.ds.Sel;
913}
914
915
916VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
917{
918 return pVCpu->cpum.s.Guest.es.Sel;
919}
920
921
922VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
923{
924 return pVCpu->cpum.s.Guest.fs.Sel;
925}
926
927
928VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
929{
930 return pVCpu->cpum.s.Guest.gs.Sel;
931}
932
933
934VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
935{
936 return pVCpu->cpum.s.Guest.ss.Sel;
937}
938
939
940VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
941{
942 return pVCpu->cpum.s.Guest.ldtr.Sel;
943}
944
945
946VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
947{
948 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
949 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
950 return pVCpu->cpum.s.Guest.ldtr.Sel;
951}
952
953
954VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
955{
956 return pVCpu->cpum.s.Guest.cr0;
957}
958
959
960VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
961{
962 return pVCpu->cpum.s.Guest.cr2;
963}
964
965
966VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
967{
968 return pVCpu->cpum.s.Guest.cr3;
969}
970
971
972VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
973{
974 return pVCpu->cpum.s.Guest.cr4;
975}
976
977
978VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
979{
980 uint64_t u64;
981 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
982 if (RT_FAILURE(rc))
983 u64 = 0;
984 return u64;
985}
986
987
988VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
989{
990 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
991}
992
993
994VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
995{
996 return pVCpu->cpum.s.Guest.eip;
997}
998
999
1000VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1001{
1002 return pVCpu->cpum.s.Guest.rip;
1003}
1004
1005
1006VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1007{
1008 return pVCpu->cpum.s.Guest.eax;
1009}
1010
1011
1012VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1013{
1014 return pVCpu->cpum.s.Guest.ebx;
1015}
1016
1017
1018VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1019{
1020 return pVCpu->cpum.s.Guest.ecx;
1021}
1022
1023
1024VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1025{
1026 return pVCpu->cpum.s.Guest.edx;
1027}
1028
1029
1030VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1031{
1032 return pVCpu->cpum.s.Guest.esi;
1033}
1034
1035
1036VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1037{
1038 return pVCpu->cpum.s.Guest.edi;
1039}
1040
1041
1042VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1043{
1044 return pVCpu->cpum.s.Guest.esp;
1045}
1046
1047
1048VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1049{
1050 return pVCpu->cpum.s.Guest.ebp;
1051}
1052
1053
1054VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1055{
1056 return pVCpu->cpum.s.Guest.eflags.u32;
1057}
1058
1059
1060VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1061{
1062 switch (iReg)
1063 {
1064 case DISCREG_CR0:
1065 *pValue = pVCpu->cpum.s.Guest.cr0;
1066 break;
1067
1068 case DISCREG_CR2:
1069 *pValue = pVCpu->cpum.s.Guest.cr2;
1070 break;
1071
1072 case DISCREG_CR3:
1073 *pValue = pVCpu->cpum.s.Guest.cr3;
1074 break;
1075
1076 case DISCREG_CR4:
1077 *pValue = pVCpu->cpum.s.Guest.cr4;
1078 break;
1079
1080 case DISCREG_CR8:
1081 {
1082 uint8_t u8Tpr;
1083 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1084 if (RT_FAILURE(rc))
1085 {
1086 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1087 *pValue = 0;
1088 return rc;
1089 }
1090 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1091 break;
1092 }
1093
1094 default:
1095 return VERR_INVALID_PARAMETER;
1096 }
1097 return VINF_SUCCESS;
1098}
1099
1100
1101VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1102{
1103 return pVCpu->cpum.s.Guest.dr[0];
1104}
1105
1106
1107VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1108{
1109 return pVCpu->cpum.s.Guest.dr[1];
1110}
1111
1112
1113VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1114{
1115 return pVCpu->cpum.s.Guest.dr[2];
1116}
1117
1118
1119VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1120{
1121 return pVCpu->cpum.s.Guest.dr[3];
1122}
1123
1124
1125VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1126{
1127 return pVCpu->cpum.s.Guest.dr[6];
1128}
1129
1130
1131VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1132{
1133 return pVCpu->cpum.s.Guest.dr[7];
1134}
1135
1136
1137VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1138{
1139 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1140 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1141 if (iReg == 4 || iReg == 5)
1142 iReg += 2;
1143 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1144 return VINF_SUCCESS;
1145}
1146
1147
1148VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1149{
1150 return pVCpu->cpum.s.Guest.msrEFER;
1151}
1152
1153
1154/**
1155 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1156 *
1157 * @returns Pointer to the leaf if found, NULL if not.
1158 *
1159 * @param pVM Pointer to the cross context VM structure.
1160 * @param uLeaf The leaf to get.
1161 */
1162PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1163{
1164 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1165 if (iEnd)
1166 {
1167 unsigned iStart = 0;
1168 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1169 for (;;)
1170 {
1171 unsigned i = iStart + (iEnd - iStart) / 2U;
1172 if (uLeaf < paLeaves[i].uLeaf)
1173 {
1174 if (i <= iStart)
1175 return NULL;
1176 iEnd = i;
1177 }
1178 else if (uLeaf > paLeaves[i].uLeaf)
1179 {
1180 i += 1;
1181 if (i >= iEnd)
1182 return NULL;
1183 iStart = i;
1184 }
1185 else
1186 {
1187 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1188 return &paLeaves[i];
1189
1190 /* This shouldn't normally happen. But in case the it does due
1191 to user configuration overrids or something, just return the
1192 first sub-leaf. */
1193 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1194 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1195 while ( paLeaves[i].uSubLeaf != 0
1196 && i > 0
1197 && uLeaf == paLeaves[i - 1].uLeaf)
1198 i--;
1199 return &paLeaves[i];
1200 }
1201 }
1202 }
1203
1204 return NULL;
1205}
1206
1207
1208/**
1209 * Looks up a CPUID leaf in the CPUID leaf array.
1210 *
1211 * @returns Pointer to the leaf if found, NULL if not.
1212 *
1213 * @param pVM Pointer to the cross context VM structure.
1214 * @param uLeaf The leaf to get.
1215 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1216 * isn't.
1217 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1218 */
1219PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1220{
1221 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1222 if (iEnd)
1223 {
1224 unsigned iStart = 0;
1225 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1226 for (;;)
1227 {
1228 unsigned i = iStart + (iEnd - iStart) / 2U;
1229 if (uLeaf < paLeaves[i].uLeaf)
1230 {
1231 if (i <= iStart)
1232 return NULL;
1233 iEnd = i;
1234 }
1235 else if (uLeaf > paLeaves[i].uLeaf)
1236 {
1237 i += 1;
1238 if (i >= iEnd)
1239 return NULL;
1240 iStart = i;
1241 }
1242 else
1243 {
1244 uSubLeaf &= paLeaves[i].fSubLeafMask;
1245 if (uSubLeaf == paLeaves[i].uSubLeaf)
1246 *pfExactSubLeafHit = true;
1247 else
1248 {
1249 /* Find the right subleaf. We return the last one before
1250 uSubLeaf if we don't find an exact match. */
1251 if (uSubLeaf < paLeaves[i].uSubLeaf)
1252 while ( i > 0
1253 && uLeaf == paLeaves[i - 1].uLeaf
1254 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1255 i--;
1256 else
1257 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1258 && uLeaf == paLeaves[i + 1].uLeaf
1259 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1260 i++;
1261 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1262 }
1263 return &paLeaves[i];
1264 }
1265 }
1266 }
1267
1268 *pfExactSubLeafHit = false;
1269 return NULL;
1270}
1271
1272
1273/**
1274 * Gets a CPUID leaf.
1275 *
1276 * @param pVCpu Pointer to the VMCPU.
1277 * @param uLeaf The CPUID leaf to get.
1278 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1279 * @param pEax Where to store the EAX value.
1280 * @param pEbx Where to store the EBX value.
1281 * @param pEcx Where to store the ECX value.
1282 * @param pEdx Where to store the EDX value.
1283 */
1284VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1285 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1286{
1287 bool fExactSubLeafHit;
1288 PVM pVM = pVCpu->CTX_SUFF(pVM);
1289 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1290 if (pLeaf)
1291 {
1292 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x\n", pLeaf->uLeaf, uLeaf));
1293 if (fExactSubLeafHit)
1294 {
1295 *pEax = pLeaf->uEax;
1296 *pEbx = pLeaf->uEbx;
1297 *pEcx = pLeaf->uEcx;
1298 *pEdx = pLeaf->uEdx;
1299
1300 /*
1301 * Deal with CPU specific information (currently only APIC ID).
1302 */
1303 if (pLeaf->fFlags & (CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE))
1304 {
1305 if (uLeaf == 1)
1306 {
1307 /* EBX: Bits 31-24: Initial APIC ID. */
1308 Assert(pVCpu->idCpu <= 255);
1309 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1310 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1311
1312 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1313 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1314 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1315 }
1316 else if (uLeaf == 0xb)
1317 {
1318 /* EDX: Initial extended APIC ID. */
1319 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1320 *pEdx = pVCpu->idCpu;
1321 }
1322 else if (uLeaf == UINT32_C(0x8000001e))
1323 {
1324 /* EAX: Initial extended APIC ID. */
1325 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1326 *pEax = pVCpu->idCpu;
1327 }
1328 else
1329 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1330 }
1331 }
1332 /*
1333 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1334 * them here, but we do the best we can here...
1335 */
1336 else
1337 {
1338 *pEax = *pEbx = *pEcx = *pEdx = 0;
1339 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1340 {
1341 *pEcx = uSubLeaf & 0xff;
1342 *pEdx = pVCpu->idCpu;
1343 }
1344 }
1345 }
1346 else
1347 {
1348 /*
1349 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1350 */
1351 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1352 {
1353 default:
1354 AssertFailed();
1355 case CPUMUNKNOWNCPUID_DEFAULTS:
1356 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1357 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1358 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1359 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1360 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1361 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1362 break;
1363 case CPUMUNKNOWNCPUID_PASSTHRU:
1364 *pEax = uLeaf;
1365 *pEbx = 0;
1366 *pEcx = uSubLeaf;
1367 *pEdx = 0;
1368 break;
1369 }
1370 }
1371 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1372}
1373
1374
1375/**
1376 * Sets a CPUID feature bit.
1377 *
1378 * @param pVM Pointer to the VM.
1379 * @param enmFeature The feature to set.
1380 */
1381VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1382{
1383 PCPUMCPUIDLEAF pLeaf;
1384
1385 switch (enmFeature)
1386 {
1387 /*
1388 * Set the APIC bit in both feature masks.
1389 */
1390 case CPUMCPUIDFEATURE_APIC:
1391 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1392 if (pLeaf)
1393 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
1394
1395 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1396 if ( pLeaf
1397 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1398 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1399
1400 pVM->cpum.s.GuestFeatures.fApic = 1;
1401 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
1402 break;
1403
1404 /*
1405 * Set the x2APIC bit in the standard feature mask.
1406 */
1407 case CPUMCPUIDFEATURE_X2APIC:
1408 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1409 if (pLeaf)
1410 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
1411 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
1412 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
1413 break;
1414
1415 /*
1416 * Set the sysenter/sysexit bit in the standard feature mask.
1417 * Assumes the caller knows what it's doing! (host must support these)
1418 */
1419 case CPUMCPUIDFEATURE_SEP:
1420 if (!pVM->cpum.s.HostFeatures.fSysEnter)
1421 {
1422 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1423 return;
1424 }
1425
1426 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1427 if (pLeaf)
1428 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
1429 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
1430 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
1431 break;
1432
1433 /*
1434 * Set the syscall/sysret bit in the extended feature mask.
1435 * Assumes the caller knows what it's doing! (host must support these)
1436 */
1437 case CPUMCPUIDFEATURE_SYSCALL:
1438 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1439 if ( !pLeaf
1440 || !pVM->cpum.s.HostFeatures.fSysCall)
1441 {
1442#if HC_ARCH_BITS == 32
1443 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
1444 mode by Intel, even when the cpu is capable of doing so in
1445 64-bit mode. Long mode requires syscall support. */
1446 if (!pVM->cpum.s.HostFeatures.fLongMode)
1447#endif
1448 {
1449 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
1450 return;
1451 }
1452 }
1453
1454 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1455 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1456 pVM->cpum.s.GuestFeatures.fSysCall = 1;
1457 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
1458 break;
1459
1460 /*
1461 * Set the PAE bit in both feature masks.
1462 * Assumes the caller knows what it's doing! (host must support these)
1463 */
1464 case CPUMCPUIDFEATURE_PAE:
1465 if (!pVM->cpum.s.HostFeatures.fPae)
1466 {
1467 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
1468 return;
1469 }
1470
1471 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1472 if (pLeaf)
1473 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
1474
1475 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1476 if ( pLeaf
1477 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1478 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1479
1480 pVM->cpum.s.GuestFeatures.fPae = 1;
1481 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
1482 break;
1483
1484 /*
1485 * Set the LONG MODE bit in the extended feature mask.
1486 * Assumes the caller knows what it's doing! (host must support these)
1487 */
1488 case CPUMCPUIDFEATURE_LONG_MODE:
1489 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1490 if ( !pLeaf
1491 || !pVM->cpum.s.HostFeatures.fLongMode)
1492 {
1493 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
1494 return;
1495 }
1496
1497 /* Valid for both Intel and AMD. */
1498 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1499 pVM->cpum.s.GuestFeatures.fLongMode = 1;
1500 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
1501 break;
1502
1503 /*
1504 * Set the NX/XD bit in the extended feature mask.
1505 * Assumes the caller knows what it's doing! (host must support these)
1506 */
1507 case CPUMCPUIDFEATURE_NX:
1508 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1509 if ( !pLeaf
1510 || !pVM->cpum.s.HostFeatures.fNoExecute)
1511 {
1512 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
1513 return;
1514 }
1515
1516 /* Valid for both Intel and AMD. */
1517 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1518 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
1519 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
1520 break;
1521
1522
1523 /*
1524 * Set the LAHF/SAHF support in 64-bit mode.
1525 * Assumes the caller knows what it's doing! (host must support this)
1526 */
1527 case CPUMCPUIDFEATURE_LAHF:
1528 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1529 if ( !pLeaf
1530 || !pVM->cpum.s.HostFeatures.fLahfSahf)
1531 {
1532 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
1533 return;
1534 }
1535
1536 /* Valid for both Intel and AMD. */
1537 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1538 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
1539 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1540 break;
1541
1542 /*
1543 * Set the page attribute table bit. This is alternative page level
1544 * cache control that doesn't much matter when everything is
1545 * virtualized, though it may when passing thru device memory.
1546 */
1547 case CPUMCPUIDFEATURE_PAT:
1548 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1549 if (pLeaf)
1550 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
1551
1552 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1553 if ( pLeaf
1554 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1555 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1556
1557 pVM->cpum.s.GuestFeatures.fPat = 1;
1558 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
1559 break;
1560
1561 /*
1562 * Set the RDTSCP support bit.
1563 * Assumes the caller knows what it's doing! (host must support this)
1564 */
1565 case CPUMCPUIDFEATURE_RDTSCP:
1566 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1567 if ( !pLeaf
1568 || !pVM->cpum.s.HostFeatures.fRdTscP
1569 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1570 {
1571 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1572 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
1573 return;
1574 }
1575
1576 /* Valid for both Intel and AMD. */
1577 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1578 pVM->cpum.s.HostFeatures.fRdTscP = 1;
1579 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1580 break;
1581
1582 /*
1583 * Set the Hypervisor Present bit in the standard feature mask.
1584 */
1585 case CPUMCPUIDFEATURE_HVP:
1586 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1587 if (pLeaf)
1588 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
1589 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
1590 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1591 break;
1592
1593 /*
1594 * Set the MWAIT Extensions Present bit in the MWAIT/MONITOR leaf.
1595 * This currently includes the Present bit and MWAITBREAK bit as well.
1596 */
1597 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1598 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1599 if ( !pLeaf
1600 || !pVM->cpum.s.HostFeatures.fMWaitExtensions)
1601 {
1602 LogRel(("CPUM: WARNING! Can't turn on MWAIT Extensions when the host doesn't support it!\n"));
1603 return;
1604 }
1605
1606 /* Valid for both Intel and AMD. */
1607 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1608 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
1609 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
1610 break;
1611
1612 /*
1613 * OSXSAVE - only used from CPUMSetGuestCR4.
1614 */
1615 case CPUMCPUIDFEATURE_OSXSAVE:
1616 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
1617
1618 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1619 AssertLogRelReturnVoid(pLeaf);
1620
1621 /* UNI: Special case for single CPU to make life simple for CPUMPatchHlpCpuId. */
1622 if (pVM->cCpus == 1)
1623 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_OSXSAVE;
1624 /* SMP: Set flag indicating OSXSAVE updating (superfluous because of the APIC ID, but that's fine). */
1625 else
1626 ASMAtomicOrU32(&pLeaf->fFlags, CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE);
1627 break;
1628
1629 default:
1630 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1631 break;
1632 }
1633
1634 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1635 {
1636 PVMCPU pVCpu = &pVM->aCpus[i];
1637 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1638 }
1639}
1640
1641
1642/**
1643 * Queries a CPUID feature bit.
1644 *
1645 * @returns boolean for feature presence
1646 * @param pVM Pointer to the VM.
1647 * @param enmFeature The feature to query.
1648 */
1649VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1650{
1651 switch (enmFeature)
1652 {
1653 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
1654 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
1655 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
1656 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
1657 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
1658 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
1659 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
1660 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
1661 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat;
1662 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
1663 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
1664 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
1665
1666 case CPUMCPUIDFEATURE_OSXSAVE:
1667 case CPUMCPUIDFEATURE_INVALID:
1668 case CPUMCPUIDFEATURE_32BIT_HACK:
1669 break;
1670 }
1671 AssertFailed();
1672 return false;
1673}
1674
1675
1676/**
1677 * Clears a CPUID feature bit.
1678 *
1679 * @param pVM Pointer to the VM.
1680 * @param enmFeature The feature to clear.
1681 */
1682VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1683{
1684 PCPUMCPUIDLEAF pLeaf;
1685 switch (enmFeature)
1686 {
1687 case CPUMCPUIDFEATURE_APIC:
1688 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1689 if (pLeaf)
1690 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1691
1692 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1693 if ( pLeaf
1694 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1695 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1696
1697 pVM->cpum.s.GuestFeatures.fApic = 0;
1698 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
1699 break;
1700
1701 case CPUMCPUIDFEATURE_X2APIC:
1702 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1703 if (pLeaf)
1704 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1705 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
1706 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
1707 break;
1708
1709 case CPUMCPUIDFEATURE_PAE:
1710 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1711 if (pLeaf)
1712 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
1713
1714 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1715 if ( pLeaf
1716 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1717 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1718
1719 pVM->cpum.s.GuestFeatures.fPae = 0;
1720 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
1721 break;
1722
1723 case CPUMCPUIDFEATURE_PAT:
1724 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1725 if (pLeaf)
1726 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
1727
1728 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1729 if ( pLeaf
1730 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1731 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1732
1733 pVM->cpum.s.GuestFeatures.fPat = 0;
1734 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
1735 break;
1736
1737 case CPUMCPUIDFEATURE_LONG_MODE:
1738 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1739 if (pLeaf)
1740 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1741 pVM->cpum.s.GuestFeatures.fLongMode = 0;
1742 break;
1743
1744 case CPUMCPUIDFEATURE_LAHF:
1745 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1746 if (pLeaf)
1747 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1748 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
1749 break;
1750
1751 case CPUMCPUIDFEATURE_RDTSCP:
1752 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1753 if (pLeaf)
1754 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1755 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
1756 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
1757 break;
1758
1759 case CPUMCPUIDFEATURE_HVP:
1760 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1761 if (pLeaf)
1762 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
1763 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
1764 break;
1765
1766 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1767 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1768 if (pLeaf)
1769 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1770 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
1771 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
1772 break;
1773
1774 /*
1775 * OSXSAVE - only used from CPUMSetGuestCR4.
1776 */
1777 case CPUMCPUIDFEATURE_OSXSAVE:
1778 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
1779
1780 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1781 AssertLogRelReturnVoid(pLeaf);
1782
1783 /* UNI: Special case for single CPU to make life easy for CPUMPatchHlpCpuId. */
1784 if (pVM->cCpus == 1)
1785 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_OSXSAVE;
1786 /* else: SMP: We never set the OSXSAVE bit and leaving the CONTAINS_OSXSAVE flag is fine. */
1787 break;
1788
1789
1790 default:
1791 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1792 break;
1793 }
1794
1795 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1796 {
1797 PVMCPU pVCpu = &pVM->aCpus[i];
1798 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1799 }
1800}
1801
1802
1803/**
1804 * Gets the host CPU vendor.
1805 *
1806 * @returns CPU vendor.
1807 * @param pVM Pointer to the VM.
1808 */
1809VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1810{
1811 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1812}
1813
1814
1815/**
1816 * Gets the CPU vendor.
1817 *
1818 * @returns CPU vendor.
1819 * @param pVM Pointer to the VM.
1820 */
1821VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1822{
1823 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1824}
1825
1826
1827VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1828{
1829 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1830 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1831}
1832
1833
1834VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1835{
1836 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1837 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1838}
1839
1840
1841VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1842{
1843 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1844 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1845}
1846
1847
1848VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1849{
1850 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1851 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1852}
1853
1854
1855VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1856{
1857 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1858 return VINF_SUCCESS; /* No need to recalc. */
1859}
1860
1861
1862VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1863{
1864 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1865 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1866}
1867
1868
1869VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1870{
1871 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1872 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1873 if (iReg == 4 || iReg == 5)
1874 iReg += 2;
1875 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1876 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1877}
1878
1879
1880/**
1881 * Recalculates the hypervisor DRx register values based on current guest
1882 * registers and DBGF breakpoints, updating changed registers depending on the
1883 * context.
1884 *
1885 * This is called whenever a guest DRx register is modified (any context) and
1886 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1887 *
1888 * In raw-mode context this function will reload any (hyper) DRx registers which
1889 * comes out with a different value. It may also have to save the host debug
1890 * registers if that haven't been done already. In this context though, we'll
1891 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1892 * are only important when breakpoints are actually enabled.
1893 *
1894 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1895 * reloaded by the HM code if it changes. Further more, we will only use the
1896 * combined register set when the VBox debugger is actually using hardware BPs,
1897 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1898 * concern us here).
1899 *
1900 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1901 * all the time.
1902 *
1903 * @returns VINF_SUCCESS.
1904 * @param pVCpu Pointer to the VMCPU.
1905 * @param iGstReg The guest debug register number that was modified.
1906 * UINT8_MAX if not guest register.
1907 * @param fForceHyper Used in HM to force hyper registers because of single
1908 * stepping.
1909 */
1910VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1911{
1912 PVM pVM = pVCpu->CTX_SUFF(pVM);
1913
1914 /*
1915 * Compare the DR7s first.
1916 *
1917 * We only care about the enabled flags. GD is virtualized when we
1918 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1919 * always have the LE and GE bits set, so no need to check and disable
1920 * stuff if they're cleared like we have to for the guest DR7.
1921 */
1922 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1923 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1924 uGstDr7 = 0;
1925 else if (!(uGstDr7 & X86_DR7_LE))
1926 uGstDr7 &= ~X86_DR7_LE_ALL;
1927 else if (!(uGstDr7 & X86_DR7_GE))
1928 uGstDr7 &= ~X86_DR7_GE_ALL;
1929
1930 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1931
1932#ifdef IN_RING0
1933 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1934 fForceHyper = true;
1935#endif
1936 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1937 {
1938 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1939#ifdef IN_RC
1940 bool const fHmEnabled = false;
1941#elif defined(IN_RING3)
1942 bool const fHmEnabled = HMIsEnabled(pVM);
1943#endif
1944
1945 /*
1946 * Ok, something is enabled. Recalc each of the breakpoints, taking
1947 * the VM debugger ones of the guest ones. In raw-mode context we will
1948 * not allow breakpoints with values inside the hypervisor area.
1949 */
1950 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1951
1952 /* bp 0 */
1953 RTGCUINTREG uNewDr0;
1954 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1955 {
1956 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1957 uNewDr0 = DBGFBpGetDR0(pVM);
1958 }
1959 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1960 {
1961 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1962#ifndef IN_RING0
1963 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1964 uNewDr0 = 0;
1965 else
1966#endif
1967 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1968 }
1969 else
1970 uNewDr0 = 0;
1971
1972 /* bp 1 */
1973 RTGCUINTREG uNewDr1;
1974 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1975 {
1976 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1977 uNewDr1 = DBGFBpGetDR1(pVM);
1978 }
1979 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1980 {
1981 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1982#ifndef IN_RING0
1983 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1984 uNewDr1 = 0;
1985 else
1986#endif
1987 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1988 }
1989 else
1990 uNewDr1 = 0;
1991
1992 /* bp 2 */
1993 RTGCUINTREG uNewDr2;
1994 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1995 {
1996 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1997 uNewDr2 = DBGFBpGetDR2(pVM);
1998 }
1999 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2000 {
2001 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2002#ifndef IN_RING0
2003 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
2004 uNewDr2 = 0;
2005 else
2006#endif
2007 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2008 }
2009 else
2010 uNewDr2 = 0;
2011
2012 /* bp 3 */
2013 RTGCUINTREG uNewDr3;
2014 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2015 {
2016 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2017 uNewDr3 = DBGFBpGetDR3(pVM);
2018 }
2019 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2020 {
2021 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2022#ifndef IN_RING0
2023 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
2024 uNewDr3 = 0;
2025 else
2026#endif
2027 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2028 }
2029 else
2030 uNewDr3 = 0;
2031
2032 /*
2033 * Apply the updates.
2034 */
2035#ifdef IN_RC
2036 /* Make sure to save host registers first. */
2037 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
2038 {
2039 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
2040 {
2041 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
2042 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
2043 }
2044 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
2045 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
2046 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
2047 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
2048 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
2049
2050 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
2051 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
2052 ASMSetDR0(uNewDr0);
2053 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2054 ASMSetDR1(uNewDr1);
2055 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2056 ASMSetDR2(uNewDr2);
2057 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2058 ASMSetDR3(uNewDr3);
2059 ASMSetDR6(X86_DR6_INIT_VAL);
2060 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2061 ASMSetDR7(uNewDr7);
2062 }
2063 else
2064#endif
2065 {
2066 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2067 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2068 CPUMSetHyperDR3(pVCpu, uNewDr3);
2069 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2070 CPUMSetHyperDR2(pVCpu, uNewDr2);
2071 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2072 CPUMSetHyperDR1(pVCpu, uNewDr1);
2073 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2074 CPUMSetHyperDR0(pVCpu, uNewDr0);
2075 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2076 CPUMSetHyperDR7(pVCpu, uNewDr7);
2077 }
2078 }
2079#ifdef IN_RING0
2080 else if (CPUMIsGuestDebugStateActive(pVCpu))
2081 {
2082 /*
2083 * Reload the register that was modified. Normally this won't happen
2084 * as we won't intercept DRx writes when not having the hyper debug
2085 * state loaded, but in case we do for some reason we'll simply deal
2086 * with it.
2087 */
2088 switch (iGstReg)
2089 {
2090 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2091 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2092 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2093 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2094 default:
2095 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2096 }
2097 }
2098#endif
2099 else
2100 {
2101 /*
2102 * No active debug state any more. In raw-mode this means we have to
2103 * make sure DR7 has everything disabled now, if we armed it already.
2104 * In ring-0 we might end up here when just single stepping.
2105 */
2106#if defined(IN_RC) || defined(IN_RING0)
2107 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2108 {
2109# ifdef IN_RC
2110 ASMSetDR7(X86_DR7_INIT_VAL);
2111# endif
2112 if (pVCpu->cpum.s.Hyper.dr[0])
2113 ASMSetDR0(0);
2114 if (pVCpu->cpum.s.Hyper.dr[1])
2115 ASMSetDR1(0);
2116 if (pVCpu->cpum.s.Hyper.dr[2])
2117 ASMSetDR2(0);
2118 if (pVCpu->cpum.s.Hyper.dr[3])
2119 ASMSetDR3(0);
2120 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2121 }
2122#endif
2123 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2124
2125 /* Clear all the registers. */
2126 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2127 pVCpu->cpum.s.Hyper.dr[3] = 0;
2128 pVCpu->cpum.s.Hyper.dr[2] = 0;
2129 pVCpu->cpum.s.Hyper.dr[1] = 0;
2130 pVCpu->cpum.s.Hyper.dr[0] = 0;
2131
2132 }
2133 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2134 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2135 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2136 pVCpu->cpum.s.Hyper.dr[7]));
2137
2138 return VINF_SUCCESS;
2139}
2140
2141
2142/**
2143 * Set the guest XCR0 register.
2144 *
2145 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
2146 * value.
2147 * @param pVCpu Pointer to the cross context VMCPU structure for the
2148 * calling EMT.
2149 * @param uNewValue The new value.
2150 * @thread EMT(pVCpu)
2151 */
2152VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
2153{
2154 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
2155 /* The X87 bit cannot be cleared. */
2156 && (uNewValue & XSAVE_C_X87)
2157 /* AVX requires SSE. */
2158 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
2159 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
2160 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
2161 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
2162 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
2163 )
2164 {
2165 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
2166 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
2167 return VINF_SUCCESS;
2168 }
2169 return VERR_CPUM_RAISE_GP_0;
2170}
2171
2172
2173/**
2174 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2175 *
2176 * @returns true if in real mode, otherwise false.
2177 * @param pVCpu Pointer to the VMCPU.
2178 */
2179VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2180{
2181 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2182}
2183
2184
2185/**
2186 * Tests if the guest has the Page Size Extension enabled (PSE).
2187 *
2188 * @returns true if in real mode, otherwise false.
2189 * @param pVCpu Pointer to the VMCPU.
2190 */
2191VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2192{
2193 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2194 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2195}
2196
2197
2198/**
2199 * Tests if the guest has the paging enabled (PG).
2200 *
2201 * @returns true if in real mode, otherwise false.
2202 * @param pVCpu Pointer to the VMCPU.
2203 */
2204VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2205{
2206 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2207}
2208
2209
2210/**
2211 * Tests if the guest has the paging enabled (PG).
2212 *
2213 * @returns true if in real mode, otherwise false.
2214 * @param pVCpu Pointer to the VMCPU.
2215 */
2216VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2217{
2218 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2219}
2220
2221
2222/**
2223 * Tests if the guest is running in real mode or not.
2224 *
2225 * @returns true if in real mode, otherwise false.
2226 * @param pVCpu Pointer to the VMCPU.
2227 */
2228VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2229{
2230 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2231}
2232
2233
2234/**
2235 * Tests if the guest is running in real or virtual 8086 mode.
2236 *
2237 * @returns @c true if it is, @c false if not.
2238 * @param pVCpu Pointer to the VMCPU.
2239 */
2240VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2241{
2242 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2243 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2244}
2245
2246
2247/**
2248 * Tests if the guest is running in protected or not.
2249 *
2250 * @returns true if in protected mode, otherwise false.
2251 * @param pVCpu Pointer to the VMCPU.
2252 */
2253VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2254{
2255 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2256}
2257
2258
2259/**
2260 * Tests if the guest is running in paged protected or not.
2261 *
2262 * @returns true if in paged protected mode, otherwise false.
2263 * @param pVCpu Pointer to the VMCPU.
2264 */
2265VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2266{
2267 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2268}
2269
2270
2271/**
2272 * Tests if the guest is running in long mode or not.
2273 *
2274 * @returns true if in long mode, otherwise false.
2275 * @param pVCpu Pointer to the VMCPU.
2276 */
2277VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2278{
2279 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2280}
2281
2282
2283/**
2284 * Tests if the guest is running in PAE mode or not.
2285 *
2286 * @returns true if in PAE mode, otherwise false.
2287 * @param pVCpu Pointer to the VMCPU.
2288 */
2289VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2290{
2291 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2292 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2293 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2294 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2295 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2296}
2297
2298
2299/**
2300 * Tests if the guest is running in 64 bits mode or not.
2301 *
2302 * @returns true if in 64 bits protected mode, otherwise false.
2303 * @param pVCpu The current virtual CPU.
2304 */
2305VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2306{
2307 if (!CPUMIsGuestInLongMode(pVCpu))
2308 return false;
2309 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2310 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2311}
2312
2313
2314/**
2315 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2316 * registers.
2317 *
2318 * @returns true if in 64 bits protected mode, otherwise false.
2319 * @param pCtx Pointer to the current guest CPU context.
2320 */
2321VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2322{
2323 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2324}
2325
2326#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2327
2328/**
2329 *
2330 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2331 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2332 * @param pVCpu The current virtual CPU.
2333 */
2334VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2335{
2336 return pVCpu->cpum.s.fRawEntered;
2337}
2338
2339/**
2340 * Transforms the guest CPU state to raw-ring mode.
2341 *
2342 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2343 *
2344 * @returns VBox status. (recompiler failure)
2345 * @param pVCpu Pointer to the VMCPU.
2346 * @see @ref pg_raw
2347 */
2348VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2349{
2350 PVM pVM = pVCpu->CTX_SUFF(pVM);
2351
2352 Assert(!pVCpu->cpum.s.fRawEntered);
2353 Assert(!pVCpu->cpum.s.fRemEntered);
2354 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2355
2356 /*
2357 * Are we in Ring-0?
2358 */
2359 if ( pCtx->ss.Sel
2360 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2361 && !pCtx->eflags.Bits.u1VM)
2362 {
2363 /*
2364 * Enter execution mode.
2365 */
2366 PATMRawEnter(pVM, pCtx);
2367
2368 /*
2369 * Set CPL to Ring-1.
2370 */
2371 pCtx->ss.Sel |= 1;
2372 if ( pCtx->cs.Sel
2373 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2374 pCtx->cs.Sel |= 1;
2375 }
2376 else
2377 {
2378# ifdef VBOX_WITH_RAW_RING1
2379 if ( EMIsRawRing1Enabled(pVM)
2380 && !pCtx->eflags.Bits.u1VM
2381 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2382 {
2383 /* Set CPL to Ring-2. */
2384 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2385 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2386 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2387 }
2388# else
2389 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2390 ("ring-1 code not supported\n"));
2391# endif
2392 /*
2393 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2394 */
2395 PATMRawEnter(pVM, pCtx);
2396 }
2397
2398 /*
2399 * Assert sanity.
2400 */
2401 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2402 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2403 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2404 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2405
2406 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2407
2408 pVCpu->cpum.s.fRawEntered = true;
2409 return VINF_SUCCESS;
2410}
2411
2412
2413/**
2414 * Transforms the guest CPU state from raw-ring mode to correct values.
2415 *
2416 * This function will change any selector registers with DPL=1 to DPL=0.
2417 *
2418 * @returns Adjusted rc.
2419 * @param pVCpu Pointer to the VMCPU.
2420 * @param rc Raw mode return code
2421 * @see @ref pg_raw
2422 */
2423VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2424{
2425 PVM pVM = pVCpu->CTX_SUFF(pVM);
2426
2427 /*
2428 * Don't leave if we've already left (in RC).
2429 */
2430 Assert(!pVCpu->cpum.s.fRemEntered);
2431 if (!pVCpu->cpum.s.fRawEntered)
2432 return rc;
2433 pVCpu->cpum.s.fRawEntered = false;
2434
2435 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2436 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2437 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2438 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2439
2440 /*
2441 * Are we executing in raw ring-1?
2442 */
2443 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2444 && !pCtx->eflags.Bits.u1VM)
2445 {
2446 /*
2447 * Leave execution mode.
2448 */
2449 PATMRawLeave(pVM, pCtx, rc);
2450 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2451 /** @todo See what happens if we remove this. */
2452 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2453 pCtx->ds.Sel &= ~X86_SEL_RPL;
2454 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2455 pCtx->es.Sel &= ~X86_SEL_RPL;
2456 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2457 pCtx->fs.Sel &= ~X86_SEL_RPL;
2458 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2459 pCtx->gs.Sel &= ~X86_SEL_RPL;
2460
2461 /*
2462 * Ring-1 selector => Ring-0.
2463 */
2464 pCtx->ss.Sel &= ~X86_SEL_RPL;
2465 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2466 pCtx->cs.Sel &= ~X86_SEL_RPL;
2467 }
2468 else
2469 {
2470 /*
2471 * PATM is taking care of the IOPL and IF flags for us.
2472 */
2473 PATMRawLeave(pVM, pCtx, rc);
2474 if (!pCtx->eflags.Bits.u1VM)
2475 {
2476# ifdef VBOX_WITH_RAW_RING1
2477 if ( EMIsRawRing1Enabled(pVM)
2478 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2479 {
2480 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2481 /** @todo See what happens if we remove this. */
2482 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2483 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2484 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2485 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2486 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2487 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2488 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2489 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2490
2491 /*
2492 * Ring-2 selector => Ring-1.
2493 */
2494 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2495 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2496 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2497 }
2498 else
2499 {
2500# endif
2501 /** @todo See what happens if we remove this. */
2502 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2503 pCtx->ds.Sel &= ~X86_SEL_RPL;
2504 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2505 pCtx->es.Sel &= ~X86_SEL_RPL;
2506 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2507 pCtx->fs.Sel &= ~X86_SEL_RPL;
2508 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2509 pCtx->gs.Sel &= ~X86_SEL_RPL;
2510# ifdef VBOX_WITH_RAW_RING1
2511 }
2512# endif
2513 }
2514 }
2515
2516 return rc;
2517}
2518
2519#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2520
2521/**
2522 * Updates the EFLAGS while we're in raw-mode.
2523 *
2524 * @param pVCpu Pointer to the VMCPU.
2525 * @param fEfl The new EFLAGS value.
2526 */
2527VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2528{
2529#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2530 if (pVCpu->cpum.s.fRawEntered)
2531 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2532 else
2533#endif
2534 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2535}
2536
2537
2538/**
2539 * Gets the EFLAGS while we're in raw-mode.
2540 *
2541 * @returns The eflags.
2542 * @param pVCpu Pointer to the current virtual CPU.
2543 */
2544VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2545{
2546#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2547 if (pVCpu->cpum.s.fRawEntered)
2548 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2549#endif
2550 return pVCpu->cpum.s.Guest.eflags.u32;
2551}
2552
2553
2554/**
2555 * Sets the specified changed flags (CPUM_CHANGED_*).
2556 *
2557 * @param pVCpu Pointer to the current virtual CPU.
2558 */
2559VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2560{
2561 pVCpu->cpum.s.fChanged |= fChangedFlags;
2562}
2563
2564
2565/**
2566 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2567 *
2568 * @returns true if supported.
2569 * @returns false if not supported.
2570 * @param pVM Pointer to the VM.
2571 */
2572VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2573{
2574 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2575}
2576
2577
2578/**
2579 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2580 * @returns true if used.
2581 * @returns false if not used.
2582 * @param pVM Pointer to the VM.
2583 */
2584VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2585{
2586 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2587}
2588
2589
2590/**
2591 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2592 * @returns true if used.
2593 * @returns false if not used.
2594 * @param pVM Pointer to the VM.
2595 */
2596VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2597{
2598 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2599}
2600
2601#ifdef IN_RC
2602
2603/**
2604 * Lazily sync in the FPU/XMM state.
2605 *
2606 * @returns VBox status code.
2607 * @param pVCpu Pointer to the VMCPU.
2608 */
2609VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2610{
2611 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2612}
2613
2614#endif /* !IN_RC */
2615
2616/**
2617 * Checks if we activated the FPU/XMM state of the guest OS.
2618 * @returns true if we did.
2619 * @returns false if not.
2620 * @param pVCpu Pointer to the VMCPU.
2621 */
2622VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2623{
2624 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
2625}
2626
2627
2628/**
2629 * Deactivate the FPU/XMM state of the guest OS.
2630 * @param pVCpu Pointer to the VMCPU.
2631 *
2632 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled
2633 * FPU state management.
2634 */
2635VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2636{
2637 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
2638 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2639}
2640
2641
2642/**
2643 * Checks if the guest debug state is active.
2644 *
2645 * @returns boolean
2646 * @param pVM Pointer to the VMCPU.
2647 */
2648VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2649{
2650 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2651}
2652
2653
2654/**
2655 * Checks if the guest debug state is to be made active during the world-switch
2656 * (currently only used for the 32->64 switcher case).
2657 *
2658 * @returns boolean
2659 * @param pVM Pointer to the VMCPU.
2660 */
2661VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2662{
2663 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2664}
2665
2666
2667/**
2668 * Checks if the hyper debug state is active.
2669 *
2670 * @returns boolean
2671 * @param pVM Pointer to the VM.
2672 */
2673VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2674{
2675 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2676}
2677
2678
2679/**
2680 * Checks if the hyper debug state is to be made active during the world-switch
2681 * (currently only used for the 32->64 switcher case).
2682 *
2683 * @returns boolean
2684 * @param pVM Pointer to the VMCPU.
2685 */
2686VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2687{
2688 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2689}
2690
2691
2692/**
2693 * Mark the guest's debug state as inactive.
2694 *
2695 * @returns boolean
2696 * @param pVM Pointer to the VM.
2697 * @todo This API doesn't make sense any more.
2698 */
2699VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2700{
2701 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2702}
2703
2704
2705/**
2706 * Get the current privilege level of the guest.
2707 *
2708 * @returns CPL
2709 * @param pVCpu Pointer to the current virtual CPU.
2710 */
2711VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2712{
2713 /*
2714 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2715 *
2716 * Note! We used to check CS.DPL here, assuming it was always equal to
2717 * CPL even if a conforming segment was loaded. But this truned out to
2718 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2719 * during install after a far call to ring 2 with VT-x. Then on newer
2720 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2721 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2722 *
2723 * So, forget CS.DPL, always use SS.DPL.
2724 *
2725 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2726 * isn't necessarily equal if the segment is conforming.
2727 * See section 4.11.1 in the AMD manual.
2728 *
2729 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2730 * right after real->prot mode switch and when in V8086 mode? That
2731 * section says the RPL specified in a direct transfere (call, jmp,
2732 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2733 * it would be impossible for an exception handle or the iret
2734 * instruction to figure out whether SS:ESP are part of the frame
2735 * or not. VBox or qemu bug must've lead to this misconception.
2736 *
2737 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2738 * selector into SS with an RPL other than the CPL when CPL != 3 and
2739 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2740 * RPL = CPL. Weird.
2741 */
2742 uint32_t uCpl;
2743 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2744 {
2745 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2746 {
2747 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2748 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2749 else
2750 {
2751 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2752#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2753# ifdef VBOX_WITH_RAW_RING1
2754 if (pVCpu->cpum.s.fRawEntered)
2755 {
2756 if ( uCpl == 2
2757 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2758 uCpl = 1;
2759 else if (uCpl == 1)
2760 uCpl = 0;
2761 }
2762 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2763# else
2764 if (uCpl == 1)
2765 uCpl = 0;
2766# endif
2767#endif
2768 }
2769 }
2770 else
2771 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2772 }
2773 else
2774 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2775 return uCpl;
2776}
2777
2778
2779/**
2780 * Gets the current guest CPU mode.
2781 *
2782 * If paging mode is what you need, check out PGMGetGuestMode().
2783 *
2784 * @returns The CPU mode.
2785 * @param pVCpu Pointer to the VMCPU.
2786 */
2787VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2788{
2789 CPUMMODE enmMode;
2790 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2791 enmMode = CPUMMODE_REAL;
2792 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2793 enmMode = CPUMMODE_PROTECTED;
2794 else
2795 enmMode = CPUMMODE_LONG;
2796
2797 return enmMode;
2798}
2799
2800
2801/**
2802 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2803 *
2804 * @returns 16, 32 or 64.
2805 * @param pVCpu The current virtual CPU.
2806 */
2807VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2808{
2809 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2810 return 16;
2811
2812 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2813 {
2814 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2815 return 16;
2816 }
2817
2818 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2819 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2820 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2821 return 64;
2822
2823 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2824 return 32;
2825
2826 return 16;
2827}
2828
2829
2830VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2831{
2832 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2833 return DISCPUMODE_16BIT;
2834
2835 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2836 {
2837 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2838 return DISCPUMODE_16BIT;
2839 }
2840
2841 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2842 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2843 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2844 return DISCPUMODE_64BIT;
2845
2846 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2847 return DISCPUMODE_32BIT;
2848
2849 return DISCPUMODE_16BIT;
2850}
2851
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette