VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 58126

Last change on this file since 58126 was 58126, checked in by vboxsync, 9 years ago

VMM: Fixed almost all the Doxygen warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 85.6 KB
Line 
1/* $Id: CPUMAllRegs.cpp 58126 2015-10-08 20:59:48Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 */
157VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
158{
159 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
160}
161
162#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
163
164
165/**
166 * Obsolete.
167 *
168 * We don't support nested hypervisor context interrupts or traps. Life is much
169 * simpler when we don't. It's also slightly faster at times.
170 *
171 * @param pVCpu The cross context virtual CPU structure.
172 */
173VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
174{
175 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
176}
177
178
179/**
180 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
181 *
182 * @param pVCpu The cross context virtual CPU structure.
183 */
184VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
185{
186 return &pVCpu->cpum.s.Hyper;
187}
188
189
190VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
191{
192 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
193 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
194}
195
196
197VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
198{
199 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
200 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
201}
202
203
204VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
205{
206 pVCpu->cpum.s.Hyper.cr3 = cr3;
207
208#ifdef IN_RC
209 /* Update the current CR3. */
210 ASMSetCR3(cr3);
211#endif
212}
213
214VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
215{
216 return pVCpu->cpum.s.Hyper.cr3;
217}
218
219
220VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
221{
222 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
223}
224
225
226VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
227{
228 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
229}
230
231
232VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
233{
234 pVCpu->cpum.s.Hyper.es.Sel = SelES;
235}
236
237
238VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
239{
240 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
241}
242
243
244VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
245{
246 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
247}
248
249
250VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
251{
252 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
253}
254
255
256VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
257{
258 pVCpu->cpum.s.Hyper.esp = u32ESP;
259}
260
261
262VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
263{
264 pVCpu->cpum.s.Hyper.esp = u32ESP;
265}
266
267
268VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
269{
270 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
271 return VINF_SUCCESS;
272}
273
274
275VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
276{
277 pVCpu->cpum.s.Hyper.eip = u32EIP;
278}
279
280
281/**
282 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
283 * EFLAGS and EIP prior to resuming guest execution.
284 *
285 * All general register not given as a parameter will be set to 0. The EFLAGS
286 * register will be set to sane values for C/C++ code execution with interrupts
287 * disabled and IOPL 0.
288 *
289 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
290 * @param u32EIP The EIP value.
291 * @param u32ESP The ESP value.
292 * @param u32EAX The EAX value.
293 * @param u32EDX The EDX value.
294 */
295VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
296{
297 pVCpu->cpum.s.Hyper.eip = u32EIP;
298 pVCpu->cpum.s.Hyper.esp = u32ESP;
299 pVCpu->cpum.s.Hyper.eax = u32EAX;
300 pVCpu->cpum.s.Hyper.edx = u32EDX;
301 pVCpu->cpum.s.Hyper.ecx = 0;
302 pVCpu->cpum.s.Hyper.ebx = 0;
303 pVCpu->cpum.s.Hyper.ebp = 0;
304 pVCpu->cpum.s.Hyper.esi = 0;
305 pVCpu->cpum.s.Hyper.edi = 0;
306 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
307}
308
309
310VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
311{
312 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
313}
314
315
316VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
317{
318 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
319}
320
321
322/** @def MAYBE_LOAD_DRx
323 * Macro for updating DRx values in raw-mode and ring-0 contexts.
324 */
325#ifdef IN_RING0
326# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
327# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
328 do { \
329 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
330 a_fnLoad(a_uValue); \
331 else \
332 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
333 } while (0)
334# else
335# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
336 do { \
337 a_fnLoad(a_uValue); \
338 } while (0)
339# endif
340
341#elif defined(IN_RC)
342# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
343 do { \
344 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
345 { a_fnLoad(a_uValue); } \
346 } while (0)
347
348#else
349# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
350#endif
351
352VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
353{
354 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
355 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
356}
357
358
359VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
360{
361 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
362 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
363}
364
365
366VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
367{
368 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
369 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
370}
371
372
373VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
374{
375 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
376 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
377}
378
379
380VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
381{
382 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
383}
384
385
386VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
387{
388 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
389#ifdef IN_RC
390 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
391#endif
392}
393
394
395VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
396{
397 return pVCpu->cpum.s.Hyper.cs.Sel;
398}
399
400
401VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
402{
403 return pVCpu->cpum.s.Hyper.ds.Sel;
404}
405
406
407VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
408{
409 return pVCpu->cpum.s.Hyper.es.Sel;
410}
411
412
413VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
414{
415 return pVCpu->cpum.s.Hyper.fs.Sel;
416}
417
418
419VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
420{
421 return pVCpu->cpum.s.Hyper.gs.Sel;
422}
423
424
425VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
426{
427 return pVCpu->cpum.s.Hyper.ss.Sel;
428}
429
430
431VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
432{
433 return pVCpu->cpum.s.Hyper.eax;
434}
435
436
437VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
438{
439 return pVCpu->cpum.s.Hyper.ebx;
440}
441
442
443VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
444{
445 return pVCpu->cpum.s.Hyper.ecx;
446}
447
448
449VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
450{
451 return pVCpu->cpum.s.Hyper.edx;
452}
453
454
455VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
456{
457 return pVCpu->cpum.s.Hyper.esi;
458}
459
460
461VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
462{
463 return pVCpu->cpum.s.Hyper.edi;
464}
465
466
467VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
468{
469 return pVCpu->cpum.s.Hyper.ebp;
470}
471
472
473VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
474{
475 return pVCpu->cpum.s.Hyper.esp;
476}
477
478
479VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
480{
481 return pVCpu->cpum.s.Hyper.eflags.u32;
482}
483
484
485VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
486{
487 return pVCpu->cpum.s.Hyper.eip;
488}
489
490
491VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
492{
493 return pVCpu->cpum.s.Hyper.rip;
494}
495
496
497VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
498{
499 if (pcbLimit)
500 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
501 return pVCpu->cpum.s.Hyper.idtr.pIdt;
502}
503
504
505VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
506{
507 if (pcbLimit)
508 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
509 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
510}
511
512
513VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
514{
515 return pVCpu->cpum.s.Hyper.ldtr.Sel;
516}
517
518
519VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
520{
521 return pVCpu->cpum.s.Hyper.dr[0];
522}
523
524
525VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
526{
527 return pVCpu->cpum.s.Hyper.dr[1];
528}
529
530
531VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
532{
533 return pVCpu->cpum.s.Hyper.dr[2];
534}
535
536
537VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
538{
539 return pVCpu->cpum.s.Hyper.dr[3];
540}
541
542
543VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
544{
545 return pVCpu->cpum.s.Hyper.dr[6];
546}
547
548
549VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
550{
551 return pVCpu->cpum.s.Hyper.dr[7];
552}
553
554
555/**
556 * Gets the pointer to the internal CPUMCTXCORE structure.
557 * This is only for reading in order to save a few calls.
558 *
559 * @param pVCpu The cross context virtual CPU structure.
560 */
561VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
562{
563 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
564}
565
566
567/**
568 * Queries the pointer to the internal CPUMCTX structure.
569 *
570 * @returns The CPUMCTX pointer.
571 * @param pVCpu The cross context virtual CPU structure.
572 */
573VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
574{
575 return &pVCpu->cpum.s.Guest;
576}
577
578VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
579{
580#ifdef VBOX_WITH_IEM
581# ifdef VBOX_WITH_RAW_MODE_NOT_R0
582 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
583 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
584# endif
585#endif
586 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
587 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
588 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
589 return VINF_SUCCESS; /* formality, consider it void. */
590}
591
592VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
593{
594#ifdef VBOX_WITH_IEM
595# ifdef VBOX_WITH_RAW_MODE_NOT_R0
596 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
597 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
598# endif
599#endif
600 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
601 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
602 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
603 return VINF_SUCCESS; /* formality, consider it void. */
604}
605
606VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
607{
608#ifdef VBOX_WITH_IEM
609# ifdef VBOX_WITH_RAW_MODE_NOT_R0
610 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
611 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
612# endif
613#endif
614 pVCpu->cpum.s.Guest.tr.Sel = tr;
615 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
616 return VINF_SUCCESS; /* formality, consider it void. */
617}
618
619VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
620{
621#ifdef VBOX_WITH_IEM
622# ifdef VBOX_WITH_RAW_MODE_NOT_R0
623 if ( ( ldtr != 0
624 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
625 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
626 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
627# endif
628#endif
629 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
630 /* The caller will set more hidden bits if it has them. */
631 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
632 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
633 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
634 return VINF_SUCCESS; /* formality, consider it void. */
635}
636
637
638/**
639 * Set the guest CR0.
640 *
641 * When called in GC, the hyper CR0 may be updated if that is
642 * required. The caller only has to take special action if AM,
643 * WP, PG or PE changes.
644 *
645 * @returns VINF_SUCCESS (consider it void).
646 * @param pVCpu The cross context virtual CPU structure.
647 * @param cr0 The new CR0 value.
648 */
649VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
650{
651#ifdef IN_RC
652 /*
653 * Check if we need to change hypervisor CR0 because
654 * of math stuff.
655 */
656 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
657 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
658 {
659 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
660 {
661 /*
662 * We haven't saved the host FPU state yet, so TS and MT are both set
663 * and EM should be reflecting the guest EM (it always does this).
664 */
665 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
666 {
667 uint32_t HyperCR0 = ASMGetCR0();
668 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
669 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
670 HyperCR0 &= ~X86_CR0_EM;
671 HyperCR0 |= cr0 & X86_CR0_EM;
672 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
673 ASMSetCR0(HyperCR0);
674 }
675# ifdef VBOX_STRICT
676 else
677 {
678 uint32_t HyperCR0 = ASMGetCR0();
679 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
680 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
681 }
682# endif
683 }
684 else
685 {
686 /*
687 * Already saved the state, so we're just mirroring
688 * the guest flags.
689 */
690 uint32_t HyperCR0 = ASMGetCR0();
691 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
692 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
693 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
694 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
695 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
696 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
697 ASMSetCR0(HyperCR0);
698 }
699 }
700#endif /* IN_RC */
701
702 /*
703 * Check for changes causing TLB flushes (for REM).
704 * The caller is responsible for calling PGM when appropriate.
705 */
706 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
707 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
708 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
709 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
710
711 /*
712 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
713 */
714 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
715 PGMCr0WpEnabled(pVCpu);
716
717 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
718 return VINF_SUCCESS;
719}
720
721
722VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
723{
724 pVCpu->cpum.s.Guest.cr2 = cr2;
725 return VINF_SUCCESS;
726}
727
728
729VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
730{
731 pVCpu->cpum.s.Guest.cr3 = cr3;
732 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
733 return VINF_SUCCESS;
734}
735
736
737VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
738{
739 /*
740 * The CR4.OSXSAVE bit is reflected in CPUID(1).ECX[27].
741 */
742 if ( (cr4 & X86_CR4_OSXSAVE)
743 != (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE) )
744 {
745 PVM pVM = pVCpu->CTX_SUFF(pVM);
746 if (cr4 & X86_CR4_OSXSAVE)
747 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
748 else
749 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
750 }
751
752 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
753 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
754 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
755
756 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
757 pVCpu->cpum.s.Guest.cr4 = cr4;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
763{
764 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
770{
771 pVCpu->cpum.s.Guest.eip = eip;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
777{
778 pVCpu->cpum.s.Guest.eax = eax;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
784{
785 pVCpu->cpum.s.Guest.ebx = ebx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
791{
792 pVCpu->cpum.s.Guest.ecx = ecx;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
798{
799 pVCpu->cpum.s.Guest.edx = edx;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
805{
806 pVCpu->cpum.s.Guest.esp = esp;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
812{
813 pVCpu->cpum.s.Guest.ebp = ebp;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
819{
820 pVCpu->cpum.s.Guest.esi = esi;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
826{
827 pVCpu->cpum.s.Guest.edi = edi;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
833{
834 pVCpu->cpum.s.Guest.ss.Sel = ss;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
840{
841 pVCpu->cpum.s.Guest.cs.Sel = cs;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
847{
848 pVCpu->cpum.s.Guest.ds.Sel = ds;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
854{
855 pVCpu->cpum.s.Guest.es.Sel = es;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
861{
862 pVCpu->cpum.s.Guest.fs.Sel = fs;
863 return VINF_SUCCESS;
864}
865
866
867VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
868{
869 pVCpu->cpum.s.Guest.gs.Sel = gs;
870 return VINF_SUCCESS;
871}
872
873
874VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
875{
876 pVCpu->cpum.s.Guest.msrEFER = val;
877}
878
879
880VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
881{
882 if (pcbLimit)
883 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
884 return pVCpu->cpum.s.Guest.idtr.pIdt;
885}
886
887
888VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
889{
890 if (pHidden)
891 *pHidden = pVCpu->cpum.s.Guest.tr;
892 return pVCpu->cpum.s.Guest.tr.Sel;
893}
894
895
896VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
897{
898 return pVCpu->cpum.s.Guest.cs.Sel;
899}
900
901
902VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
903{
904 return pVCpu->cpum.s.Guest.ds.Sel;
905}
906
907
908VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
909{
910 return pVCpu->cpum.s.Guest.es.Sel;
911}
912
913
914VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
915{
916 return pVCpu->cpum.s.Guest.fs.Sel;
917}
918
919
920VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
921{
922 return pVCpu->cpum.s.Guest.gs.Sel;
923}
924
925
926VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
927{
928 return pVCpu->cpum.s.Guest.ss.Sel;
929}
930
931
932VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
933{
934 return pVCpu->cpum.s.Guest.ldtr.Sel;
935}
936
937
938VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
939{
940 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
941 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
942 return pVCpu->cpum.s.Guest.ldtr.Sel;
943}
944
945
946VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
947{
948 return pVCpu->cpum.s.Guest.cr0;
949}
950
951
952VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
953{
954 return pVCpu->cpum.s.Guest.cr2;
955}
956
957
958VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
959{
960 return pVCpu->cpum.s.Guest.cr3;
961}
962
963
964VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
965{
966 return pVCpu->cpum.s.Guest.cr4;
967}
968
969
970VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
971{
972 uint64_t u64;
973 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
974 if (RT_FAILURE(rc))
975 u64 = 0;
976 return u64;
977}
978
979
980VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
981{
982 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
983}
984
985
986VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
987{
988 return pVCpu->cpum.s.Guest.eip;
989}
990
991
992VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
993{
994 return pVCpu->cpum.s.Guest.rip;
995}
996
997
998VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
999{
1000 return pVCpu->cpum.s.Guest.eax;
1001}
1002
1003
1004VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1005{
1006 return pVCpu->cpum.s.Guest.ebx;
1007}
1008
1009
1010VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1011{
1012 return pVCpu->cpum.s.Guest.ecx;
1013}
1014
1015
1016VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1017{
1018 return pVCpu->cpum.s.Guest.edx;
1019}
1020
1021
1022VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1023{
1024 return pVCpu->cpum.s.Guest.esi;
1025}
1026
1027
1028VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1029{
1030 return pVCpu->cpum.s.Guest.edi;
1031}
1032
1033
1034VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1035{
1036 return pVCpu->cpum.s.Guest.esp;
1037}
1038
1039
1040VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1041{
1042 return pVCpu->cpum.s.Guest.ebp;
1043}
1044
1045
1046VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1047{
1048 return pVCpu->cpum.s.Guest.eflags.u32;
1049}
1050
1051
1052VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1053{
1054 switch (iReg)
1055 {
1056 case DISCREG_CR0:
1057 *pValue = pVCpu->cpum.s.Guest.cr0;
1058 break;
1059
1060 case DISCREG_CR2:
1061 *pValue = pVCpu->cpum.s.Guest.cr2;
1062 break;
1063
1064 case DISCREG_CR3:
1065 *pValue = pVCpu->cpum.s.Guest.cr3;
1066 break;
1067
1068 case DISCREG_CR4:
1069 *pValue = pVCpu->cpum.s.Guest.cr4;
1070 break;
1071
1072 case DISCREG_CR8:
1073 {
1074 uint8_t u8Tpr;
1075 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1076 if (RT_FAILURE(rc))
1077 {
1078 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1079 *pValue = 0;
1080 return rc;
1081 }
1082 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1083 break;
1084 }
1085
1086 default:
1087 return VERR_INVALID_PARAMETER;
1088 }
1089 return VINF_SUCCESS;
1090}
1091
1092
1093VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1094{
1095 return pVCpu->cpum.s.Guest.dr[0];
1096}
1097
1098
1099VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1100{
1101 return pVCpu->cpum.s.Guest.dr[1];
1102}
1103
1104
1105VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1106{
1107 return pVCpu->cpum.s.Guest.dr[2];
1108}
1109
1110
1111VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1112{
1113 return pVCpu->cpum.s.Guest.dr[3];
1114}
1115
1116
1117VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1118{
1119 return pVCpu->cpum.s.Guest.dr[6];
1120}
1121
1122
1123VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1124{
1125 return pVCpu->cpum.s.Guest.dr[7];
1126}
1127
1128
1129VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1130{
1131 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1132 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1133 if (iReg == 4 || iReg == 5)
1134 iReg += 2;
1135 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1136 return VINF_SUCCESS;
1137}
1138
1139
1140VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1141{
1142 return pVCpu->cpum.s.Guest.msrEFER;
1143}
1144
1145
1146/**
1147 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1148 *
1149 * @returns Pointer to the leaf if found, NULL if not.
1150 *
1151 * @param pVM The cross context VM structure.
1152 * @param uLeaf The leaf to get.
1153 */
1154PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1155{
1156 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1157 if (iEnd)
1158 {
1159 unsigned iStart = 0;
1160 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1161 for (;;)
1162 {
1163 unsigned i = iStart + (iEnd - iStart) / 2U;
1164 if (uLeaf < paLeaves[i].uLeaf)
1165 {
1166 if (i <= iStart)
1167 return NULL;
1168 iEnd = i;
1169 }
1170 else if (uLeaf > paLeaves[i].uLeaf)
1171 {
1172 i += 1;
1173 if (i >= iEnd)
1174 return NULL;
1175 iStart = i;
1176 }
1177 else
1178 {
1179 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1180 return &paLeaves[i];
1181
1182 /* This shouldn't normally happen. But in case the it does due
1183 to user configuration overrids or something, just return the
1184 first sub-leaf. */
1185 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1186 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1187 while ( paLeaves[i].uSubLeaf != 0
1188 && i > 0
1189 && uLeaf == paLeaves[i - 1].uLeaf)
1190 i--;
1191 return &paLeaves[i];
1192 }
1193 }
1194 }
1195
1196 return NULL;
1197}
1198
1199
1200/**
1201 * Looks up a CPUID leaf in the CPUID leaf array.
1202 *
1203 * @returns Pointer to the leaf if found, NULL if not.
1204 *
1205 * @param pVM The cross context VM structure.
1206 * @param uLeaf The leaf to get.
1207 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1208 * isn't.
1209 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1210 */
1211PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1212{
1213 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1214 if (iEnd)
1215 {
1216 unsigned iStart = 0;
1217 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1218 for (;;)
1219 {
1220 unsigned i = iStart + (iEnd - iStart) / 2U;
1221 if (uLeaf < paLeaves[i].uLeaf)
1222 {
1223 if (i <= iStart)
1224 return NULL;
1225 iEnd = i;
1226 }
1227 else if (uLeaf > paLeaves[i].uLeaf)
1228 {
1229 i += 1;
1230 if (i >= iEnd)
1231 return NULL;
1232 iStart = i;
1233 }
1234 else
1235 {
1236 uSubLeaf &= paLeaves[i].fSubLeafMask;
1237 if (uSubLeaf == paLeaves[i].uSubLeaf)
1238 *pfExactSubLeafHit = true;
1239 else
1240 {
1241 /* Find the right subleaf. We return the last one before
1242 uSubLeaf if we don't find an exact match. */
1243 if (uSubLeaf < paLeaves[i].uSubLeaf)
1244 while ( i > 0
1245 && uLeaf == paLeaves[i - 1].uLeaf
1246 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1247 i--;
1248 else
1249 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1250 && uLeaf == paLeaves[i + 1].uLeaf
1251 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1252 i++;
1253 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1254 }
1255 return &paLeaves[i];
1256 }
1257 }
1258 }
1259
1260 *pfExactSubLeafHit = false;
1261 return NULL;
1262}
1263
1264
1265/**
1266 * Gets a CPUID leaf.
1267 *
1268 * @param pVCpu The cross context virtual CPU structure.
1269 * @param uLeaf The CPUID leaf to get.
1270 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1271 * @param pEax Where to store the EAX value.
1272 * @param pEbx Where to store the EBX value.
1273 * @param pEcx Where to store the ECX value.
1274 * @param pEdx Where to store the EDX value.
1275 */
1276VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1277 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1278{
1279 bool fExactSubLeafHit;
1280 PVM pVM = pVCpu->CTX_SUFF(pVM);
1281 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1282 if (pLeaf)
1283 {
1284 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1285 if (fExactSubLeafHit)
1286 {
1287 *pEax = pLeaf->uEax;
1288 *pEbx = pLeaf->uEbx;
1289 *pEcx = pLeaf->uEcx;
1290 *pEdx = pLeaf->uEdx;
1291
1292 /*
1293 * Deal with CPU specific information (currently only APIC ID).
1294 */
1295 if (pLeaf->fFlags & (CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE))
1296 {
1297 if (uLeaf == 1)
1298 {
1299 /* EBX: Bits 31-24: Initial APIC ID. */
1300 Assert(pVCpu->idCpu <= 255);
1301 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1302 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1303
1304 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1305 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1306 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1307 }
1308 else if (uLeaf == 0xb)
1309 {
1310 /* EDX: Initial extended APIC ID. */
1311 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1312 *pEdx = pVCpu->idCpu;
1313 }
1314 else if (uLeaf == UINT32_C(0x8000001e))
1315 {
1316 /* EAX: Initial extended APIC ID. */
1317 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1318 *pEax = pVCpu->idCpu;
1319 }
1320 else
1321 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1322 }
1323 }
1324 /*
1325 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1326 * them here, but we do the best we can here...
1327 */
1328 else
1329 {
1330 *pEax = *pEbx = *pEcx = *pEdx = 0;
1331 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1332 {
1333 *pEcx = uSubLeaf & 0xff;
1334 *pEdx = pVCpu->idCpu;
1335 }
1336 }
1337 }
1338 else
1339 {
1340 /*
1341 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1342 */
1343 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1344 {
1345 default:
1346 AssertFailed();
1347 case CPUMUNKNOWNCPUID_DEFAULTS:
1348 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1349 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1350 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1351 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1352 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1353 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1354 break;
1355 case CPUMUNKNOWNCPUID_PASSTHRU:
1356 *pEax = uLeaf;
1357 *pEbx = 0;
1358 *pEcx = uSubLeaf;
1359 *pEdx = 0;
1360 break;
1361 }
1362 }
1363 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1364}
1365
1366
1367/**
1368 * Sets a CPUID feature bit.
1369 *
1370 * @param pVM The cross context VM structure.
1371 * @param enmFeature The feature to set.
1372 */
1373VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1374{
1375 PCPUMCPUIDLEAF pLeaf;
1376
1377 switch (enmFeature)
1378 {
1379 /*
1380 * Set the APIC bit in both feature masks.
1381 */
1382 case CPUMCPUIDFEATURE_APIC:
1383 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1384 if (pLeaf)
1385 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
1386
1387 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1388 if ( pLeaf
1389 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1390 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1391
1392 pVM->cpum.s.GuestFeatures.fApic = 1;
1393 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
1394 break;
1395
1396 /*
1397 * Set the x2APIC bit in the standard feature mask.
1398 */
1399 case CPUMCPUIDFEATURE_X2APIC:
1400 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1401 if (pLeaf)
1402 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
1403 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
1404 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
1405 break;
1406
1407 /*
1408 * Set the sysenter/sysexit bit in the standard feature mask.
1409 * Assumes the caller knows what it's doing! (host must support these)
1410 */
1411 case CPUMCPUIDFEATURE_SEP:
1412 if (!pVM->cpum.s.HostFeatures.fSysEnter)
1413 {
1414 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1415 return;
1416 }
1417
1418 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1419 if (pLeaf)
1420 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
1421 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
1422 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
1423 break;
1424
1425 /*
1426 * Set the syscall/sysret bit in the extended feature mask.
1427 * Assumes the caller knows what it's doing! (host must support these)
1428 */
1429 case CPUMCPUIDFEATURE_SYSCALL:
1430 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1431 if ( !pLeaf
1432 || !pVM->cpum.s.HostFeatures.fSysCall)
1433 {
1434#if HC_ARCH_BITS == 32
1435 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
1436 mode by Intel, even when the cpu is capable of doing so in
1437 64-bit mode. Long mode requires syscall support. */
1438 if (!pVM->cpum.s.HostFeatures.fLongMode)
1439#endif
1440 {
1441 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
1442 return;
1443 }
1444 }
1445
1446 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1447 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1448 pVM->cpum.s.GuestFeatures.fSysCall = 1;
1449 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
1450 break;
1451
1452 /*
1453 * Set the PAE bit in both feature masks.
1454 * Assumes the caller knows what it's doing! (host must support these)
1455 */
1456 case CPUMCPUIDFEATURE_PAE:
1457 if (!pVM->cpum.s.HostFeatures.fPae)
1458 {
1459 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
1460 return;
1461 }
1462
1463 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1464 if (pLeaf)
1465 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
1466
1467 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1468 if ( pLeaf
1469 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1470 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1471
1472 pVM->cpum.s.GuestFeatures.fPae = 1;
1473 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
1474 break;
1475
1476 /*
1477 * Set the LONG MODE bit in the extended feature mask.
1478 * Assumes the caller knows what it's doing! (host must support these)
1479 */
1480 case CPUMCPUIDFEATURE_LONG_MODE:
1481 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1482 if ( !pLeaf
1483 || !pVM->cpum.s.HostFeatures.fLongMode)
1484 {
1485 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
1486 return;
1487 }
1488
1489 /* Valid for both Intel and AMD. */
1490 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1491 pVM->cpum.s.GuestFeatures.fLongMode = 1;
1492 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
1493 break;
1494
1495 /*
1496 * Set the NX/XD bit in the extended feature mask.
1497 * Assumes the caller knows what it's doing! (host must support these)
1498 */
1499 case CPUMCPUIDFEATURE_NX:
1500 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1501 if ( !pLeaf
1502 || !pVM->cpum.s.HostFeatures.fNoExecute)
1503 {
1504 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
1505 return;
1506 }
1507
1508 /* Valid for both Intel and AMD. */
1509 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1510 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
1511 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
1512 break;
1513
1514
1515 /*
1516 * Set the LAHF/SAHF support in 64-bit mode.
1517 * Assumes the caller knows what it's doing! (host must support this)
1518 */
1519 case CPUMCPUIDFEATURE_LAHF:
1520 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1521 if ( !pLeaf
1522 || !pVM->cpum.s.HostFeatures.fLahfSahf)
1523 {
1524 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
1525 return;
1526 }
1527
1528 /* Valid for both Intel and AMD. */
1529 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1530 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
1531 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1532 break;
1533
1534 /*
1535 * Set the page attribute table bit. This is alternative page level
1536 * cache control that doesn't much matter when everything is
1537 * virtualized, though it may when passing thru device memory.
1538 */
1539 case CPUMCPUIDFEATURE_PAT:
1540 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1541 if (pLeaf)
1542 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
1543
1544 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1545 if ( pLeaf
1546 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1547 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1548
1549 pVM->cpum.s.GuestFeatures.fPat = 1;
1550 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
1551 break;
1552
1553 /*
1554 * Set the RDTSCP support bit.
1555 * Assumes the caller knows what it's doing! (host must support this)
1556 */
1557 case CPUMCPUIDFEATURE_RDTSCP:
1558 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1559 if ( !pLeaf
1560 || !pVM->cpum.s.HostFeatures.fRdTscP
1561 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1562 {
1563 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1564 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
1565 return;
1566 }
1567
1568 /* Valid for both Intel and AMD. */
1569 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1570 pVM->cpum.s.HostFeatures.fRdTscP = 1;
1571 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1572 break;
1573
1574 /*
1575 * Set the Hypervisor Present bit in the standard feature mask.
1576 */
1577 case CPUMCPUIDFEATURE_HVP:
1578 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1579 if (pLeaf)
1580 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
1581 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
1582 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1583 break;
1584
1585 /*
1586 * Set the MWAIT Extensions Present bit in the MWAIT/MONITOR leaf.
1587 * This currently includes the Present bit and MWAITBREAK bit as well.
1588 */
1589 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1590 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1591 if ( !pLeaf
1592 || !pVM->cpum.s.HostFeatures.fMWaitExtensions)
1593 {
1594 LogRel(("CPUM: WARNING! Can't turn on MWAIT Extensions when the host doesn't support it!\n"));
1595 return;
1596 }
1597
1598 /* Valid for both Intel and AMD. */
1599 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1600 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
1601 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
1602 break;
1603
1604 /*
1605 * OSXSAVE - only used from CPUMSetGuestCR4.
1606 */
1607 case CPUMCPUIDFEATURE_OSXSAVE:
1608 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
1609
1610 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1611 AssertLogRelReturnVoid(pLeaf);
1612
1613 /* UNI: Special case for single CPU to make life simple for CPUMPatchHlpCpuId. */
1614 if (pVM->cCpus == 1)
1615 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_OSXSAVE;
1616 /* SMP: Set flag indicating OSXSAVE updating (superfluous because of the APIC ID, but that's fine). */
1617 else
1618 ASMAtomicOrU32(&pLeaf->fFlags, CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE);
1619 break;
1620
1621 default:
1622 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1623 break;
1624 }
1625
1626 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1627 {
1628 PVMCPU pVCpu = &pVM->aCpus[i];
1629 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1630 }
1631}
1632
1633
1634/**
1635 * Queries a CPUID feature bit.
1636 *
1637 * @returns boolean for feature presence
1638 * @param pVM The cross context VM structure.
1639 * @param enmFeature The feature to query.
1640 */
1641VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1642{
1643 switch (enmFeature)
1644 {
1645 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
1646 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
1647 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
1648 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
1649 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
1650 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
1651 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
1652 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
1653 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat;
1654 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
1655 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
1656 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
1657
1658 case CPUMCPUIDFEATURE_OSXSAVE:
1659 case CPUMCPUIDFEATURE_INVALID:
1660 case CPUMCPUIDFEATURE_32BIT_HACK:
1661 break;
1662 }
1663 AssertFailed();
1664 return false;
1665}
1666
1667
1668/**
1669 * Clears a CPUID feature bit.
1670 *
1671 * @param pVM The cross context VM structure.
1672 * @param enmFeature The feature to clear.
1673 */
1674VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1675{
1676 PCPUMCPUIDLEAF pLeaf;
1677 switch (enmFeature)
1678 {
1679 case CPUMCPUIDFEATURE_APIC:
1680 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1681 if (pLeaf)
1682 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1683
1684 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1685 if ( pLeaf
1686 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1687 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1688
1689 pVM->cpum.s.GuestFeatures.fApic = 0;
1690 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
1691 break;
1692
1693 case CPUMCPUIDFEATURE_X2APIC:
1694 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1695 if (pLeaf)
1696 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1697 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
1698 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
1699 break;
1700
1701 case CPUMCPUIDFEATURE_PAE:
1702 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1703 if (pLeaf)
1704 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
1705
1706 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1707 if ( pLeaf
1708 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1709 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1710
1711 pVM->cpum.s.GuestFeatures.fPae = 0;
1712 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
1713 break;
1714
1715 case CPUMCPUIDFEATURE_PAT:
1716 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1717 if (pLeaf)
1718 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
1719
1720 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1721 if ( pLeaf
1722 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1723 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1724
1725 pVM->cpum.s.GuestFeatures.fPat = 0;
1726 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
1727 break;
1728
1729 case CPUMCPUIDFEATURE_LONG_MODE:
1730 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1731 if (pLeaf)
1732 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1733 pVM->cpum.s.GuestFeatures.fLongMode = 0;
1734 break;
1735
1736 case CPUMCPUIDFEATURE_LAHF:
1737 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1738 if (pLeaf)
1739 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1740 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
1741 break;
1742
1743 case CPUMCPUIDFEATURE_RDTSCP:
1744 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1745 if (pLeaf)
1746 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1747 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
1748 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
1749 break;
1750
1751 case CPUMCPUIDFEATURE_HVP:
1752 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1753 if (pLeaf)
1754 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
1755 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
1756 break;
1757
1758 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1759 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1760 if (pLeaf)
1761 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1762 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
1763 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
1764 break;
1765
1766 /*
1767 * OSXSAVE - only used from CPUMSetGuestCR4.
1768 */
1769 case CPUMCPUIDFEATURE_OSXSAVE:
1770 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
1771
1772 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1773 AssertLogRelReturnVoid(pLeaf);
1774
1775 /* UNI: Special case for single CPU to make life easy for CPUMPatchHlpCpuId. */
1776 if (pVM->cCpus == 1)
1777 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_OSXSAVE;
1778 /* else: SMP: We never set the OSXSAVE bit and leaving the CONTAINS_OSXSAVE flag is fine. */
1779 break;
1780
1781
1782 default:
1783 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1784 break;
1785 }
1786
1787 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1788 {
1789 PVMCPU pVCpu = &pVM->aCpus[i];
1790 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1791 }
1792}
1793
1794
1795/**
1796 * Gets the host CPU vendor.
1797 *
1798 * @returns CPU vendor.
1799 * @param pVM The cross context VM structure.
1800 */
1801VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1802{
1803 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1804}
1805
1806
1807/**
1808 * Gets the CPU vendor.
1809 *
1810 * @returns CPU vendor.
1811 * @param pVM The cross context VM structure.
1812 */
1813VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1814{
1815 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1816}
1817
1818
1819VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1820{
1821 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1822 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1823}
1824
1825
1826VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1827{
1828 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1829 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1830}
1831
1832
1833VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1834{
1835 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1836 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1837}
1838
1839
1840VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1841{
1842 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1843 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1844}
1845
1846
1847VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1848{
1849 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1850 return VINF_SUCCESS; /* No need to recalc. */
1851}
1852
1853
1854VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1855{
1856 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1857 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1858}
1859
1860
1861VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1862{
1863 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1864 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1865 if (iReg == 4 || iReg == 5)
1866 iReg += 2;
1867 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1868 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1869}
1870
1871
1872/**
1873 * Recalculates the hypervisor DRx register values based on current guest
1874 * registers and DBGF breakpoints, updating changed registers depending on the
1875 * context.
1876 *
1877 * This is called whenever a guest DRx register is modified (any context) and
1878 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1879 *
1880 * In raw-mode context this function will reload any (hyper) DRx registers which
1881 * comes out with a different value. It may also have to save the host debug
1882 * registers if that haven't been done already. In this context though, we'll
1883 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1884 * are only important when breakpoints are actually enabled.
1885 *
1886 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1887 * reloaded by the HM code if it changes. Further more, we will only use the
1888 * combined register set when the VBox debugger is actually using hardware BPs,
1889 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1890 * concern us here).
1891 *
1892 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1893 * all the time.
1894 *
1895 * @returns VINF_SUCCESS.
1896 * @param pVCpu The cross context virtual CPU structure.
1897 * @param iGstReg The guest debug register number that was modified.
1898 * UINT8_MAX if not guest register.
1899 * @param fForceHyper Used in HM to force hyper registers because of single
1900 * stepping.
1901 */
1902VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1903{
1904 PVM pVM = pVCpu->CTX_SUFF(pVM);
1905
1906 /*
1907 * Compare the DR7s first.
1908 *
1909 * We only care about the enabled flags. GD is virtualized when we
1910 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1911 * always have the LE and GE bits set, so no need to check and disable
1912 * stuff if they're cleared like we have to for the guest DR7.
1913 */
1914 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1915 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1916 uGstDr7 = 0;
1917 else if (!(uGstDr7 & X86_DR7_LE))
1918 uGstDr7 &= ~X86_DR7_LE_ALL;
1919 else if (!(uGstDr7 & X86_DR7_GE))
1920 uGstDr7 &= ~X86_DR7_GE_ALL;
1921
1922 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1923
1924#ifdef IN_RING0
1925 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1926 fForceHyper = true;
1927#endif
1928 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1929 {
1930 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1931#ifdef IN_RC
1932 bool const fHmEnabled = false;
1933#elif defined(IN_RING3)
1934 bool const fHmEnabled = HMIsEnabled(pVM);
1935#endif
1936
1937 /*
1938 * Ok, something is enabled. Recalc each of the breakpoints, taking
1939 * the VM debugger ones of the guest ones. In raw-mode context we will
1940 * not allow breakpoints with values inside the hypervisor area.
1941 */
1942 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1943
1944 /* bp 0 */
1945 RTGCUINTREG uNewDr0;
1946 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1947 {
1948 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1949 uNewDr0 = DBGFBpGetDR0(pVM);
1950 }
1951 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1952 {
1953 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1954#ifndef IN_RING0
1955 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1956 uNewDr0 = 0;
1957 else
1958#endif
1959 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1960 }
1961 else
1962 uNewDr0 = 0;
1963
1964 /* bp 1 */
1965 RTGCUINTREG uNewDr1;
1966 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1967 {
1968 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1969 uNewDr1 = DBGFBpGetDR1(pVM);
1970 }
1971 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1972 {
1973 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1974#ifndef IN_RING0
1975 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1976 uNewDr1 = 0;
1977 else
1978#endif
1979 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1980 }
1981 else
1982 uNewDr1 = 0;
1983
1984 /* bp 2 */
1985 RTGCUINTREG uNewDr2;
1986 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1987 {
1988 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1989 uNewDr2 = DBGFBpGetDR2(pVM);
1990 }
1991 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1992 {
1993 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1994#ifndef IN_RING0
1995 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1996 uNewDr2 = 0;
1997 else
1998#endif
1999 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2000 }
2001 else
2002 uNewDr2 = 0;
2003
2004 /* bp 3 */
2005 RTGCUINTREG uNewDr3;
2006 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2007 {
2008 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2009 uNewDr3 = DBGFBpGetDR3(pVM);
2010 }
2011 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2012 {
2013 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2014#ifndef IN_RING0
2015 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
2016 uNewDr3 = 0;
2017 else
2018#endif
2019 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2020 }
2021 else
2022 uNewDr3 = 0;
2023
2024 /*
2025 * Apply the updates.
2026 */
2027#ifdef IN_RC
2028 /* Make sure to save host registers first. */
2029 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
2030 {
2031 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
2032 {
2033 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
2034 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
2035 }
2036 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
2037 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
2038 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
2039 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
2040 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
2041
2042 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
2043 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
2044 ASMSetDR0(uNewDr0);
2045 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2046 ASMSetDR1(uNewDr1);
2047 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2048 ASMSetDR2(uNewDr2);
2049 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2050 ASMSetDR3(uNewDr3);
2051 ASMSetDR6(X86_DR6_INIT_VAL);
2052 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2053 ASMSetDR7(uNewDr7);
2054 }
2055 else
2056#endif
2057 {
2058 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2059 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2060 CPUMSetHyperDR3(pVCpu, uNewDr3);
2061 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2062 CPUMSetHyperDR2(pVCpu, uNewDr2);
2063 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2064 CPUMSetHyperDR1(pVCpu, uNewDr1);
2065 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2066 CPUMSetHyperDR0(pVCpu, uNewDr0);
2067 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2068 CPUMSetHyperDR7(pVCpu, uNewDr7);
2069 }
2070 }
2071#ifdef IN_RING0
2072 else if (CPUMIsGuestDebugStateActive(pVCpu))
2073 {
2074 /*
2075 * Reload the register that was modified. Normally this won't happen
2076 * as we won't intercept DRx writes when not having the hyper debug
2077 * state loaded, but in case we do for some reason we'll simply deal
2078 * with it.
2079 */
2080 switch (iGstReg)
2081 {
2082 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2083 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2084 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2085 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2086 default:
2087 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2088 }
2089 }
2090#endif
2091 else
2092 {
2093 /*
2094 * No active debug state any more. In raw-mode this means we have to
2095 * make sure DR7 has everything disabled now, if we armed it already.
2096 * In ring-0 we might end up here when just single stepping.
2097 */
2098#if defined(IN_RC) || defined(IN_RING0)
2099 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2100 {
2101# ifdef IN_RC
2102 ASMSetDR7(X86_DR7_INIT_VAL);
2103# endif
2104 if (pVCpu->cpum.s.Hyper.dr[0])
2105 ASMSetDR0(0);
2106 if (pVCpu->cpum.s.Hyper.dr[1])
2107 ASMSetDR1(0);
2108 if (pVCpu->cpum.s.Hyper.dr[2])
2109 ASMSetDR2(0);
2110 if (pVCpu->cpum.s.Hyper.dr[3])
2111 ASMSetDR3(0);
2112 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2113 }
2114#endif
2115 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2116
2117 /* Clear all the registers. */
2118 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2119 pVCpu->cpum.s.Hyper.dr[3] = 0;
2120 pVCpu->cpum.s.Hyper.dr[2] = 0;
2121 pVCpu->cpum.s.Hyper.dr[1] = 0;
2122 pVCpu->cpum.s.Hyper.dr[0] = 0;
2123
2124 }
2125 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2126 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2127 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2128 pVCpu->cpum.s.Hyper.dr[7]));
2129
2130 return VINF_SUCCESS;
2131}
2132
2133
2134/**
2135 * Set the guest XCR0 register.
2136 *
2137 * Will load additional state if the FPU state is already loaded (in ring-0 &
2138 * raw-mode context).
2139 *
2140 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
2141 * value.
2142 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2143 * @param uNewValue The new value.
2144 * @thread EMT(pVCpu)
2145 */
2146VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
2147{
2148 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
2149 /* The X87 bit cannot be cleared. */
2150 && (uNewValue & XSAVE_C_X87)
2151 /* AVX requires SSE. */
2152 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
2153 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
2154 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
2155 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
2156 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
2157 )
2158 {
2159 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
2160
2161 /* If more state components are enabled, we need to take care to load
2162 them if the FPU/SSE state is already loaded. May otherwise leak
2163 host state to the guest. */
2164 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
2165 if (fNewComponents)
2166 {
2167#if defined(IN_RING0) || defined(IN_RC)
2168 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU)
2169 {
2170 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
2171 /* Adding more components. */
2172 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
2173 else
2174 {
2175 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
2176 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
2177 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
2178 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
2179 }
2180 }
2181#endif
2182 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
2183 }
2184 return VINF_SUCCESS;
2185 }
2186 return VERR_CPUM_RAISE_GP_0;
2187}
2188
2189
2190/**
2191 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2192 *
2193 * @returns true if in real mode, otherwise false.
2194 * @param pVCpu The cross context virtual CPU structure.
2195 */
2196VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2197{
2198 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2199}
2200
2201
2202/**
2203 * Tests if the guest has the Page Size Extension enabled (PSE).
2204 *
2205 * @returns true if in real mode, otherwise false.
2206 * @param pVCpu The cross context virtual CPU structure.
2207 */
2208VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2209{
2210 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2211 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2212}
2213
2214
2215/**
2216 * Tests if the guest has the paging enabled (PG).
2217 *
2218 * @returns true if in real mode, otherwise false.
2219 * @param pVCpu The cross context virtual CPU structure.
2220 */
2221VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2222{
2223 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2224}
2225
2226
2227/**
2228 * Tests if the guest has the paging enabled (PG).
2229 *
2230 * @returns true if in real mode, otherwise false.
2231 * @param pVCpu The cross context virtual CPU structure.
2232 */
2233VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2234{
2235 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2236}
2237
2238
2239/**
2240 * Tests if the guest is running in real mode or not.
2241 *
2242 * @returns true if in real mode, otherwise false.
2243 * @param pVCpu The cross context virtual CPU structure.
2244 */
2245VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2246{
2247 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2248}
2249
2250
2251/**
2252 * Tests if the guest is running in real or virtual 8086 mode.
2253 *
2254 * @returns @c true if it is, @c false if not.
2255 * @param pVCpu The cross context virtual CPU structure.
2256 */
2257VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2258{
2259 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2260 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2261}
2262
2263
2264/**
2265 * Tests if the guest is running in protected or not.
2266 *
2267 * @returns true if in protected mode, otherwise false.
2268 * @param pVCpu The cross context virtual CPU structure.
2269 */
2270VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2271{
2272 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2273}
2274
2275
2276/**
2277 * Tests if the guest is running in paged protected or not.
2278 *
2279 * @returns true if in paged protected mode, otherwise false.
2280 * @param pVCpu The cross context virtual CPU structure.
2281 */
2282VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2283{
2284 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2285}
2286
2287
2288/**
2289 * Tests if the guest is running in long mode or not.
2290 *
2291 * @returns true if in long mode, otherwise false.
2292 * @param pVCpu The cross context virtual CPU structure.
2293 */
2294VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2295{
2296 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2297}
2298
2299
2300/**
2301 * Tests if the guest is running in PAE mode or not.
2302 *
2303 * @returns true if in PAE mode, otherwise false.
2304 * @param pVCpu The cross context virtual CPU structure.
2305 */
2306VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2307{
2308 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2309 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2310 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2311 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2312 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2313}
2314
2315
2316/**
2317 * Tests if the guest is running in 64 bits mode or not.
2318 *
2319 * @returns true if in 64 bits protected mode, otherwise false.
2320 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2321 */
2322VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2323{
2324 if (!CPUMIsGuestInLongMode(pVCpu))
2325 return false;
2326 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2327 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2328}
2329
2330
2331/**
2332 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2333 * registers.
2334 *
2335 * @returns true if in 64 bits protected mode, otherwise false.
2336 * @param pCtx Pointer to the current guest CPU context.
2337 */
2338VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2339{
2340 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2341}
2342
2343#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2344
2345/**
2346 *
2347 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2348 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2349 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2350 */
2351VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2352{
2353 return pVCpu->cpum.s.fRawEntered;
2354}
2355
2356/**
2357 * Transforms the guest CPU state to raw-ring mode.
2358 *
2359 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2360 *
2361 * @returns VBox status. (recompiler failure)
2362 * @param pVCpu The cross context virtual CPU structure.
2363 * @see @ref pg_raw
2364 */
2365VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2366{
2367 PVM pVM = pVCpu->CTX_SUFF(pVM);
2368
2369 Assert(!pVCpu->cpum.s.fRawEntered);
2370 Assert(!pVCpu->cpum.s.fRemEntered);
2371 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2372
2373 /*
2374 * Are we in Ring-0?
2375 */
2376 if ( pCtx->ss.Sel
2377 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2378 && !pCtx->eflags.Bits.u1VM)
2379 {
2380 /*
2381 * Enter execution mode.
2382 */
2383 PATMRawEnter(pVM, pCtx);
2384
2385 /*
2386 * Set CPL to Ring-1.
2387 */
2388 pCtx->ss.Sel |= 1;
2389 if ( pCtx->cs.Sel
2390 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2391 pCtx->cs.Sel |= 1;
2392 }
2393 else
2394 {
2395# ifdef VBOX_WITH_RAW_RING1
2396 if ( EMIsRawRing1Enabled(pVM)
2397 && !pCtx->eflags.Bits.u1VM
2398 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2399 {
2400 /* Set CPL to Ring-2. */
2401 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2402 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2403 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2404 }
2405# else
2406 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2407 ("ring-1 code not supported\n"));
2408# endif
2409 /*
2410 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2411 */
2412 PATMRawEnter(pVM, pCtx);
2413 }
2414
2415 /*
2416 * Assert sanity.
2417 */
2418 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2419 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2420 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2421 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2422
2423 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2424
2425 pVCpu->cpum.s.fRawEntered = true;
2426 return VINF_SUCCESS;
2427}
2428
2429
2430/**
2431 * Transforms the guest CPU state from raw-ring mode to correct values.
2432 *
2433 * This function will change any selector registers with DPL=1 to DPL=0.
2434 *
2435 * @returns Adjusted rc.
2436 * @param pVCpu The cross context virtual CPU structure.
2437 * @param rc Raw mode return code
2438 * @see @ref pg_raw
2439 */
2440VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2441{
2442 PVM pVM = pVCpu->CTX_SUFF(pVM);
2443
2444 /*
2445 * Don't leave if we've already left (in RC).
2446 */
2447 Assert(!pVCpu->cpum.s.fRemEntered);
2448 if (!pVCpu->cpum.s.fRawEntered)
2449 return rc;
2450 pVCpu->cpum.s.fRawEntered = false;
2451
2452 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2453 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2454 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2455 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2456
2457 /*
2458 * Are we executing in raw ring-1?
2459 */
2460 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2461 && !pCtx->eflags.Bits.u1VM)
2462 {
2463 /*
2464 * Leave execution mode.
2465 */
2466 PATMRawLeave(pVM, pCtx, rc);
2467 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2468 /** @todo See what happens if we remove this. */
2469 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2470 pCtx->ds.Sel &= ~X86_SEL_RPL;
2471 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2472 pCtx->es.Sel &= ~X86_SEL_RPL;
2473 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2474 pCtx->fs.Sel &= ~X86_SEL_RPL;
2475 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2476 pCtx->gs.Sel &= ~X86_SEL_RPL;
2477
2478 /*
2479 * Ring-1 selector => Ring-0.
2480 */
2481 pCtx->ss.Sel &= ~X86_SEL_RPL;
2482 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2483 pCtx->cs.Sel &= ~X86_SEL_RPL;
2484 }
2485 else
2486 {
2487 /*
2488 * PATM is taking care of the IOPL and IF flags for us.
2489 */
2490 PATMRawLeave(pVM, pCtx, rc);
2491 if (!pCtx->eflags.Bits.u1VM)
2492 {
2493# ifdef VBOX_WITH_RAW_RING1
2494 if ( EMIsRawRing1Enabled(pVM)
2495 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2496 {
2497 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2498 /** @todo See what happens if we remove this. */
2499 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2500 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2501 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2502 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2503 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2504 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2505 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2506 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2507
2508 /*
2509 * Ring-2 selector => Ring-1.
2510 */
2511 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2512 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2513 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2514 }
2515 else
2516 {
2517# endif
2518 /** @todo See what happens if we remove this. */
2519 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2520 pCtx->ds.Sel &= ~X86_SEL_RPL;
2521 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2522 pCtx->es.Sel &= ~X86_SEL_RPL;
2523 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2524 pCtx->fs.Sel &= ~X86_SEL_RPL;
2525 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2526 pCtx->gs.Sel &= ~X86_SEL_RPL;
2527# ifdef VBOX_WITH_RAW_RING1
2528 }
2529# endif
2530 }
2531 }
2532
2533 return rc;
2534}
2535
2536#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2537
2538/**
2539 * Updates the EFLAGS while we're in raw-mode.
2540 *
2541 * @param pVCpu The cross context virtual CPU structure.
2542 * @param fEfl The new EFLAGS value.
2543 */
2544VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2545{
2546#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2547 if (pVCpu->cpum.s.fRawEntered)
2548 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2549 else
2550#endif
2551 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2552}
2553
2554
2555/**
2556 * Gets the EFLAGS while we're in raw-mode.
2557 *
2558 * @returns The eflags.
2559 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2560 */
2561VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2562{
2563#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2564 if (pVCpu->cpum.s.fRawEntered)
2565 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2566#endif
2567 return pVCpu->cpum.s.Guest.eflags.u32;
2568}
2569
2570
2571/**
2572 * Sets the specified changed flags (CPUM_CHANGED_*).
2573 *
2574 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2575 * @param fChangedAdd The changed flags to add.
2576 */
2577VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2578{
2579 pVCpu->cpum.s.fChanged |= fChangedAdd;
2580}
2581
2582
2583/**
2584 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2585 *
2586 * @returns true if supported.
2587 * @returns false if not supported.
2588 * @param pVM The cross context VM structure.
2589 */
2590VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2591{
2592 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2593}
2594
2595
2596/**
2597 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2598 * @returns true if used.
2599 * @returns false if not used.
2600 * @param pVM The cross context VM structure.
2601 */
2602VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2603{
2604 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2605}
2606
2607
2608/**
2609 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2610 * @returns true if used.
2611 * @returns false if not used.
2612 * @param pVM The cross context VM structure.
2613 */
2614VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2615{
2616 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2617}
2618
2619#ifdef IN_RC
2620
2621/**
2622 * Lazily sync in the FPU/XMM state.
2623 *
2624 * @returns VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure.
2626 */
2627VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2628{
2629 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2630}
2631
2632#endif /* !IN_RC */
2633
2634/**
2635 * Checks if we activated the FPU/XMM state of the guest OS.
2636 * @returns true if we did.
2637 * @returns false if not.
2638 * @param pVCpu The cross context virtual CPU structure.
2639 */
2640VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2641{
2642 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
2643}
2644
2645
2646/**
2647 * Checks if the guest debug state is active.
2648 *
2649 * @returns boolean
2650 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2651 */
2652VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2653{
2654 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2655}
2656
2657
2658/**
2659 * Checks if the guest debug state is to be made active during the world-switch
2660 * (currently only used for the 32->64 switcher case).
2661 *
2662 * @returns boolean
2663 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2664 */
2665VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2666{
2667 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2668}
2669
2670
2671/**
2672 * Checks if the hyper debug state is active.
2673 *
2674 * @returns boolean
2675 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2676 */
2677VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2678{
2679 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2680}
2681
2682
2683/**
2684 * Checks if the hyper debug state is to be made active during the world-switch
2685 * (currently only used for the 32->64 switcher case).
2686 *
2687 * @returns boolean
2688 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2689 */
2690VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2691{
2692 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2693}
2694
2695
2696/**
2697 * Mark the guest's debug state as inactive.
2698 *
2699 * @returns boolean
2700 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2701 * @todo This API doesn't make sense any more.
2702 */
2703VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2704{
2705 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2706 NOREF(pVCpu);
2707}
2708
2709
2710/**
2711 * Get the current privilege level of the guest.
2712 *
2713 * @returns CPL
2714 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2715 */
2716VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2717{
2718 /*
2719 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2720 *
2721 * Note! We used to check CS.DPL here, assuming it was always equal to
2722 * CPL even if a conforming segment was loaded. But this truned out to
2723 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2724 * during install after a far call to ring 2 with VT-x. Then on newer
2725 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2726 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2727 *
2728 * So, forget CS.DPL, always use SS.DPL.
2729 *
2730 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2731 * isn't necessarily equal if the segment is conforming.
2732 * See section 4.11.1 in the AMD manual.
2733 *
2734 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2735 * right after real->prot mode switch and when in V8086 mode? That
2736 * section says the RPL specified in a direct transfere (call, jmp,
2737 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2738 * it would be impossible for an exception handle or the iret
2739 * instruction to figure out whether SS:ESP are part of the frame
2740 * or not. VBox or qemu bug must've lead to this misconception.
2741 *
2742 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2743 * selector into SS with an RPL other than the CPL when CPL != 3 and
2744 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2745 * RPL = CPL. Weird.
2746 */
2747 uint32_t uCpl;
2748 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2749 {
2750 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2751 {
2752 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2753 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2754 else
2755 {
2756 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2757#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2758# ifdef VBOX_WITH_RAW_RING1
2759 if (pVCpu->cpum.s.fRawEntered)
2760 {
2761 if ( uCpl == 2
2762 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2763 uCpl = 1;
2764 else if (uCpl == 1)
2765 uCpl = 0;
2766 }
2767 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2768# else
2769 if (uCpl == 1)
2770 uCpl = 0;
2771# endif
2772#endif
2773 }
2774 }
2775 else
2776 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2777 }
2778 else
2779 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2780 return uCpl;
2781}
2782
2783
2784/**
2785 * Gets the current guest CPU mode.
2786 *
2787 * If paging mode is what you need, check out PGMGetGuestMode().
2788 *
2789 * @returns The CPU mode.
2790 * @param pVCpu The cross context virtual CPU structure.
2791 */
2792VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2793{
2794 CPUMMODE enmMode;
2795 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2796 enmMode = CPUMMODE_REAL;
2797 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2798 enmMode = CPUMMODE_PROTECTED;
2799 else
2800 enmMode = CPUMMODE_LONG;
2801
2802 return enmMode;
2803}
2804
2805
2806/**
2807 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2808 *
2809 * @returns 16, 32 or 64.
2810 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2811 */
2812VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2813{
2814 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2815 return 16;
2816
2817 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2818 {
2819 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2820 return 16;
2821 }
2822
2823 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2824 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2825 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2826 return 64;
2827
2828 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2829 return 32;
2830
2831 return 16;
2832}
2833
2834
2835VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2836{
2837 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2838 return DISCPUMODE_16BIT;
2839
2840 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2841 {
2842 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2843 return DISCPUMODE_16BIT;
2844 }
2845
2846 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2847 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2848 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2849 return DISCPUMODE_64BIT;
2850
2851 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2852 return DISCPUMODE_32BIT;
2853
2854 return DISCPUMODE_16BIT;
2855}
2856
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette