VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 61683

Last change on this file since 61683 was 61071, checked in by vboxsync, 9 years ago

VMM/CPUM: comment indent.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 86.6 KB
Line 
1/* $Id: CPUMAllRegs.cpp 61071 2016-05-20 02:58:41Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 */
157VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
158{
159 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
160}
161
162#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
163
164
165/**
166 * Obsolete.
167 *
168 * We don't support nested hypervisor context interrupts or traps. Life is much
169 * simpler when we don't. It's also slightly faster at times.
170 *
171 * @param pVCpu The cross context virtual CPU structure.
172 */
173VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
174{
175 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
176}
177
178
179/**
180 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
181 *
182 * @param pVCpu The cross context virtual CPU structure.
183 */
184VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
185{
186 return &pVCpu->cpum.s.Hyper;
187}
188
189
190VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
191{
192 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
193 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
194}
195
196
197VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
198{
199 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
200 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
201}
202
203
204VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
205{
206 pVCpu->cpum.s.Hyper.cr3 = cr3;
207
208#ifdef IN_RC
209 /* Update the current CR3. */
210 ASMSetCR3(cr3);
211#endif
212}
213
214VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
215{
216 return pVCpu->cpum.s.Hyper.cr3;
217}
218
219
220VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
221{
222 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
223}
224
225
226VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
227{
228 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
229}
230
231
232VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
233{
234 pVCpu->cpum.s.Hyper.es.Sel = SelES;
235}
236
237
238VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
239{
240 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
241}
242
243
244VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
245{
246 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
247}
248
249
250VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
251{
252 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
253}
254
255
256VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
257{
258 pVCpu->cpum.s.Hyper.esp = u32ESP;
259}
260
261
262VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
263{
264 pVCpu->cpum.s.Hyper.esp = u32ESP;
265}
266
267
268VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
269{
270 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
271 return VINF_SUCCESS;
272}
273
274
275VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
276{
277 pVCpu->cpum.s.Hyper.eip = u32EIP;
278}
279
280
281/**
282 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
283 * EFLAGS and EIP prior to resuming guest execution.
284 *
285 * All general register not given as a parameter will be set to 0. The EFLAGS
286 * register will be set to sane values for C/C++ code execution with interrupts
287 * disabled and IOPL 0.
288 *
289 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
290 * @param u32EIP The EIP value.
291 * @param u32ESP The ESP value.
292 * @param u32EAX The EAX value.
293 * @param u32EDX The EDX value.
294 */
295VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
296{
297 pVCpu->cpum.s.Hyper.eip = u32EIP;
298 pVCpu->cpum.s.Hyper.esp = u32ESP;
299 pVCpu->cpum.s.Hyper.eax = u32EAX;
300 pVCpu->cpum.s.Hyper.edx = u32EDX;
301 pVCpu->cpum.s.Hyper.ecx = 0;
302 pVCpu->cpum.s.Hyper.ebx = 0;
303 pVCpu->cpum.s.Hyper.ebp = 0;
304 pVCpu->cpum.s.Hyper.esi = 0;
305 pVCpu->cpum.s.Hyper.edi = 0;
306 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
307}
308
309
310VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
311{
312 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
313}
314
315
316VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
317{
318 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
319}
320
321
322/** @def MAYBE_LOAD_DRx
323 * Macro for updating DRx values in raw-mode and ring-0 contexts.
324 */
325#ifdef IN_RING0
326# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
327# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
328 do { \
329 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
330 a_fnLoad(a_uValue); \
331 else \
332 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
333 } while (0)
334# else
335# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
336 do { \
337 a_fnLoad(a_uValue); \
338 } while (0)
339# endif
340
341#elif defined(IN_RC)
342# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
343 do { \
344 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
345 { a_fnLoad(a_uValue); } \
346 } while (0)
347
348#else
349# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
350#endif
351
352VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
353{
354 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
355 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
356}
357
358
359VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
360{
361 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
362 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
363}
364
365
366VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
367{
368 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
369 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
370}
371
372
373VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
374{
375 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
376 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
377}
378
379
380VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
381{
382 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
383}
384
385
386VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
387{
388 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
389#ifdef IN_RC
390 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
391#endif
392}
393
394
395VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
396{
397 return pVCpu->cpum.s.Hyper.cs.Sel;
398}
399
400
401VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
402{
403 return pVCpu->cpum.s.Hyper.ds.Sel;
404}
405
406
407VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
408{
409 return pVCpu->cpum.s.Hyper.es.Sel;
410}
411
412
413VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
414{
415 return pVCpu->cpum.s.Hyper.fs.Sel;
416}
417
418
419VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
420{
421 return pVCpu->cpum.s.Hyper.gs.Sel;
422}
423
424
425VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
426{
427 return pVCpu->cpum.s.Hyper.ss.Sel;
428}
429
430
431VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
432{
433 return pVCpu->cpum.s.Hyper.eax;
434}
435
436
437VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
438{
439 return pVCpu->cpum.s.Hyper.ebx;
440}
441
442
443VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
444{
445 return pVCpu->cpum.s.Hyper.ecx;
446}
447
448
449VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
450{
451 return pVCpu->cpum.s.Hyper.edx;
452}
453
454
455VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
456{
457 return pVCpu->cpum.s.Hyper.esi;
458}
459
460
461VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
462{
463 return pVCpu->cpum.s.Hyper.edi;
464}
465
466
467VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
468{
469 return pVCpu->cpum.s.Hyper.ebp;
470}
471
472
473VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
474{
475 return pVCpu->cpum.s.Hyper.esp;
476}
477
478
479VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
480{
481 return pVCpu->cpum.s.Hyper.eflags.u32;
482}
483
484
485VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
486{
487 return pVCpu->cpum.s.Hyper.eip;
488}
489
490
491VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
492{
493 return pVCpu->cpum.s.Hyper.rip;
494}
495
496
497VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
498{
499 if (pcbLimit)
500 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
501 return pVCpu->cpum.s.Hyper.idtr.pIdt;
502}
503
504
505VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
506{
507 if (pcbLimit)
508 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
509 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
510}
511
512
513VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
514{
515 return pVCpu->cpum.s.Hyper.ldtr.Sel;
516}
517
518
519VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
520{
521 return pVCpu->cpum.s.Hyper.dr[0];
522}
523
524
525VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
526{
527 return pVCpu->cpum.s.Hyper.dr[1];
528}
529
530
531VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
532{
533 return pVCpu->cpum.s.Hyper.dr[2];
534}
535
536
537VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
538{
539 return pVCpu->cpum.s.Hyper.dr[3];
540}
541
542
543VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
544{
545 return pVCpu->cpum.s.Hyper.dr[6];
546}
547
548
549VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
550{
551 return pVCpu->cpum.s.Hyper.dr[7];
552}
553
554
555/**
556 * Gets the pointer to the internal CPUMCTXCORE structure.
557 * This is only for reading in order to save a few calls.
558 *
559 * @param pVCpu The cross context virtual CPU structure.
560 */
561VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
562{
563 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
564}
565
566
567/**
568 * Queries the pointer to the internal CPUMCTX structure.
569 *
570 * @returns The CPUMCTX pointer.
571 * @param pVCpu The cross context virtual CPU structure.
572 */
573VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
574{
575 return &pVCpu->cpum.s.Guest;
576}
577
578VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
579{
580#ifdef VBOX_WITH_RAW_MODE_NOT_R0
581 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
583#endif
584 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
585 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
586 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
587 return VINF_SUCCESS; /* formality, consider it void. */
588}
589
590VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
591{
592#ifdef VBOX_WITH_RAW_MODE_NOT_R0
593 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
594 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
595#endif
596 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
597 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
598 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
599 return VINF_SUCCESS; /* formality, consider it void. */
600}
601
602VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
603{
604#ifdef VBOX_WITH_RAW_MODE_NOT_R0
605 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
606 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
607#endif
608 pVCpu->cpum.s.Guest.tr.Sel = tr;
609 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
610 return VINF_SUCCESS; /* formality, consider it void. */
611}
612
613VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
614{
615#ifdef VBOX_WITH_RAW_MODE_NOT_R0
616 if ( ( ldtr != 0
617 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
618 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
619 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
620#endif
621 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
622 /* The caller will set more hidden bits if it has them. */
623 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
624 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
625 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
626 return VINF_SUCCESS; /* formality, consider it void. */
627}
628
629
630/**
631 * Set the guest CR0.
632 *
633 * When called in GC, the hyper CR0 may be updated if that is
634 * required. The caller only has to take special action if AM,
635 * WP, PG or PE changes.
636 *
637 * @returns VINF_SUCCESS (consider it void).
638 * @param pVCpu The cross context virtual CPU structure.
639 * @param cr0 The new CR0 value.
640 */
641VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
642{
643#ifdef IN_RC
644 /*
645 * Check if we need to change hypervisor CR0 because
646 * of math stuff.
647 */
648 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
649 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
650 {
651 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
652 {
653 /*
654 * We haven't loaded the guest FPU state yet, so TS and MT are both set
655 * and EM should be reflecting the guest EM (it always does this).
656 */
657 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
658 {
659 uint32_t HyperCR0 = ASMGetCR0();
660 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
661 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
662 HyperCR0 &= ~X86_CR0_EM;
663 HyperCR0 |= cr0 & X86_CR0_EM;
664 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
665 ASMSetCR0(HyperCR0);
666 }
667# ifdef VBOX_STRICT
668 else
669 {
670 uint32_t HyperCR0 = ASMGetCR0();
671 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
672 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
673 }
674# endif
675 }
676 else
677 {
678 /*
679 * Already loaded the guest FPU state, so we're just mirroring
680 * the guest flags.
681 */
682 uint32_t HyperCR0 = ASMGetCR0();
683 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
684 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
685 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
686 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
687 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
688 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
689 ASMSetCR0(HyperCR0);
690 }
691 }
692#endif /* IN_RC */
693
694 /*
695 * Check for changes causing TLB flushes (for REM).
696 * The caller is responsible for calling PGM when appropriate.
697 */
698 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
699 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
700 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
701 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
702
703 /*
704 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
705 */
706 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
707 PGMCr0WpEnabled(pVCpu);
708
709 /* The ET flag is settable on a 386 and hardwired on 486+. */
710 if ( !(cr0 & X86_CR0_ET)
711 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
712 cr0 |= X86_CR0_ET;
713
714 pVCpu->cpum.s.Guest.cr0 = cr0;
715 return VINF_SUCCESS;
716}
717
718
719VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
720{
721 pVCpu->cpum.s.Guest.cr2 = cr2;
722 return VINF_SUCCESS;
723}
724
725
726VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
727{
728 pVCpu->cpum.s.Guest.cr3 = cr3;
729 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
735{
736 /*
737 * The CR4.OSXSAVE bit is reflected in CPUID(1).ECX[27].
738 */
739 if ( (cr4 & X86_CR4_OSXSAVE)
740 != (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE) )
741 {
742 PVM pVM = pVCpu->CTX_SUFF(pVM);
743 if (cr4 & X86_CR4_OSXSAVE)
744 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
745 else
746 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
747 }
748
749 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
750 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
751 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
752
753 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
754 pVCpu->cpum.s.Guest.cr4 = cr4;
755 return VINF_SUCCESS;
756}
757
758
759VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
760{
761 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
762 return VINF_SUCCESS;
763}
764
765
766VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
767{
768 pVCpu->cpum.s.Guest.eip = eip;
769 return VINF_SUCCESS;
770}
771
772
773VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
774{
775 pVCpu->cpum.s.Guest.eax = eax;
776 return VINF_SUCCESS;
777}
778
779
780VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
781{
782 pVCpu->cpum.s.Guest.ebx = ebx;
783 return VINF_SUCCESS;
784}
785
786
787VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
788{
789 pVCpu->cpum.s.Guest.ecx = ecx;
790 return VINF_SUCCESS;
791}
792
793
794VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
795{
796 pVCpu->cpum.s.Guest.edx = edx;
797 return VINF_SUCCESS;
798}
799
800
801VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
802{
803 pVCpu->cpum.s.Guest.esp = esp;
804 return VINF_SUCCESS;
805}
806
807
808VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
809{
810 pVCpu->cpum.s.Guest.ebp = ebp;
811 return VINF_SUCCESS;
812}
813
814
815VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
816{
817 pVCpu->cpum.s.Guest.esi = esi;
818 return VINF_SUCCESS;
819}
820
821
822VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
823{
824 pVCpu->cpum.s.Guest.edi = edi;
825 return VINF_SUCCESS;
826}
827
828
829VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
830{
831 pVCpu->cpum.s.Guest.ss.Sel = ss;
832 return VINF_SUCCESS;
833}
834
835
836VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
837{
838 pVCpu->cpum.s.Guest.cs.Sel = cs;
839 return VINF_SUCCESS;
840}
841
842
843VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
844{
845 pVCpu->cpum.s.Guest.ds.Sel = ds;
846 return VINF_SUCCESS;
847}
848
849
850VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
851{
852 pVCpu->cpum.s.Guest.es.Sel = es;
853 return VINF_SUCCESS;
854}
855
856
857VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
858{
859 pVCpu->cpum.s.Guest.fs.Sel = fs;
860 return VINF_SUCCESS;
861}
862
863
864VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
865{
866 pVCpu->cpum.s.Guest.gs.Sel = gs;
867 return VINF_SUCCESS;
868}
869
870
871VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
872{
873 pVCpu->cpum.s.Guest.msrEFER = val;
874}
875
876
877VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
878{
879 if (pcbLimit)
880 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
881 return pVCpu->cpum.s.Guest.idtr.pIdt;
882}
883
884
885VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
886{
887 if (pHidden)
888 *pHidden = pVCpu->cpum.s.Guest.tr;
889 return pVCpu->cpum.s.Guest.tr.Sel;
890}
891
892
893VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
894{
895 return pVCpu->cpum.s.Guest.cs.Sel;
896}
897
898
899VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
900{
901 return pVCpu->cpum.s.Guest.ds.Sel;
902}
903
904
905VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
906{
907 return pVCpu->cpum.s.Guest.es.Sel;
908}
909
910
911VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
912{
913 return pVCpu->cpum.s.Guest.fs.Sel;
914}
915
916
917VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
918{
919 return pVCpu->cpum.s.Guest.gs.Sel;
920}
921
922
923VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
924{
925 return pVCpu->cpum.s.Guest.ss.Sel;
926}
927
928
929VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
930{
931 return pVCpu->cpum.s.Guest.ldtr.Sel;
932}
933
934
935VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
936{
937 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
938 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
939 return pVCpu->cpum.s.Guest.ldtr.Sel;
940}
941
942
943VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
944{
945 return pVCpu->cpum.s.Guest.cr0;
946}
947
948
949VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
950{
951 return pVCpu->cpum.s.Guest.cr2;
952}
953
954
955VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
956{
957 return pVCpu->cpum.s.Guest.cr3;
958}
959
960
961VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
962{
963 return pVCpu->cpum.s.Guest.cr4;
964}
965
966
967VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
968{
969 uint64_t u64;
970 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
971 if (RT_FAILURE(rc))
972 u64 = 0;
973 return u64;
974}
975
976
977VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
978{
979 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
980}
981
982
983VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
984{
985 return pVCpu->cpum.s.Guest.eip;
986}
987
988
989VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
990{
991 return pVCpu->cpum.s.Guest.rip;
992}
993
994
995VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
996{
997 return pVCpu->cpum.s.Guest.eax;
998}
999
1000
1001VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1002{
1003 return pVCpu->cpum.s.Guest.ebx;
1004}
1005
1006
1007VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1008{
1009 return pVCpu->cpum.s.Guest.ecx;
1010}
1011
1012
1013VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1014{
1015 return pVCpu->cpum.s.Guest.edx;
1016}
1017
1018
1019VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1020{
1021 return pVCpu->cpum.s.Guest.esi;
1022}
1023
1024
1025VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1026{
1027 return pVCpu->cpum.s.Guest.edi;
1028}
1029
1030
1031VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1032{
1033 return pVCpu->cpum.s.Guest.esp;
1034}
1035
1036
1037VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1038{
1039 return pVCpu->cpum.s.Guest.ebp;
1040}
1041
1042
1043VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1044{
1045 return pVCpu->cpum.s.Guest.eflags.u32;
1046}
1047
1048
1049VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1050{
1051 switch (iReg)
1052 {
1053 case DISCREG_CR0:
1054 *pValue = pVCpu->cpum.s.Guest.cr0;
1055 break;
1056
1057 case DISCREG_CR2:
1058 *pValue = pVCpu->cpum.s.Guest.cr2;
1059 break;
1060
1061 case DISCREG_CR3:
1062 *pValue = pVCpu->cpum.s.Guest.cr3;
1063 break;
1064
1065 case DISCREG_CR4:
1066 *pValue = pVCpu->cpum.s.Guest.cr4;
1067 break;
1068
1069 case DISCREG_CR8:
1070 {
1071 uint8_t u8Tpr;
1072 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1073 if (RT_FAILURE(rc))
1074 {
1075 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1076 *pValue = 0;
1077 return rc;
1078 }
1079 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1080 break;
1081 }
1082
1083 default:
1084 return VERR_INVALID_PARAMETER;
1085 }
1086 return VINF_SUCCESS;
1087}
1088
1089
1090VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1091{
1092 return pVCpu->cpum.s.Guest.dr[0];
1093}
1094
1095
1096VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1097{
1098 return pVCpu->cpum.s.Guest.dr[1];
1099}
1100
1101
1102VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1103{
1104 return pVCpu->cpum.s.Guest.dr[2];
1105}
1106
1107
1108VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1109{
1110 return pVCpu->cpum.s.Guest.dr[3];
1111}
1112
1113
1114VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1115{
1116 return pVCpu->cpum.s.Guest.dr[6];
1117}
1118
1119
1120VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1121{
1122 return pVCpu->cpum.s.Guest.dr[7];
1123}
1124
1125
1126VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1127{
1128 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1129 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1130 if (iReg == 4 || iReg == 5)
1131 iReg += 2;
1132 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1133 return VINF_SUCCESS;
1134}
1135
1136
1137VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1138{
1139 return pVCpu->cpum.s.Guest.msrEFER;
1140}
1141
1142
1143/**
1144 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1145 *
1146 * @returns Pointer to the leaf if found, NULL if not.
1147 *
1148 * @param pVM The cross context VM structure.
1149 * @param uLeaf The leaf to get.
1150 */
1151PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1152{
1153 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1154 if (iEnd)
1155 {
1156 unsigned iStart = 0;
1157 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1158 for (;;)
1159 {
1160 unsigned i = iStart + (iEnd - iStart) / 2U;
1161 if (uLeaf < paLeaves[i].uLeaf)
1162 {
1163 if (i <= iStart)
1164 return NULL;
1165 iEnd = i;
1166 }
1167 else if (uLeaf > paLeaves[i].uLeaf)
1168 {
1169 i += 1;
1170 if (i >= iEnd)
1171 return NULL;
1172 iStart = i;
1173 }
1174 else
1175 {
1176 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1177 return &paLeaves[i];
1178
1179 /* This shouldn't normally happen. But in case the it does due
1180 to user configuration overrids or something, just return the
1181 first sub-leaf. */
1182 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1183 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1184 while ( paLeaves[i].uSubLeaf != 0
1185 && i > 0
1186 && uLeaf == paLeaves[i - 1].uLeaf)
1187 i--;
1188 return &paLeaves[i];
1189 }
1190 }
1191 }
1192
1193 return NULL;
1194}
1195
1196
1197/**
1198 * Looks up a CPUID leaf in the CPUID leaf array.
1199 *
1200 * @returns Pointer to the leaf if found, NULL if not.
1201 *
1202 * @param pVM The cross context VM structure.
1203 * @param uLeaf The leaf to get.
1204 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1205 * isn't.
1206 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1207 */
1208PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1209{
1210 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1211 if (iEnd)
1212 {
1213 unsigned iStart = 0;
1214 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1215 for (;;)
1216 {
1217 unsigned i = iStart + (iEnd - iStart) / 2U;
1218 if (uLeaf < paLeaves[i].uLeaf)
1219 {
1220 if (i <= iStart)
1221 return NULL;
1222 iEnd = i;
1223 }
1224 else if (uLeaf > paLeaves[i].uLeaf)
1225 {
1226 i += 1;
1227 if (i >= iEnd)
1228 return NULL;
1229 iStart = i;
1230 }
1231 else
1232 {
1233 uSubLeaf &= paLeaves[i].fSubLeafMask;
1234 if (uSubLeaf == paLeaves[i].uSubLeaf)
1235 *pfExactSubLeafHit = true;
1236 else
1237 {
1238 /* Find the right subleaf. We return the last one before
1239 uSubLeaf if we don't find an exact match. */
1240 if (uSubLeaf < paLeaves[i].uSubLeaf)
1241 while ( i > 0
1242 && uLeaf == paLeaves[i - 1].uLeaf
1243 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1244 i--;
1245 else
1246 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1247 && uLeaf == paLeaves[i + 1].uLeaf
1248 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1249 i++;
1250 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1251 }
1252 return &paLeaves[i];
1253 }
1254 }
1255 }
1256
1257 *pfExactSubLeafHit = false;
1258 return NULL;
1259}
1260
1261
1262/**
1263 * Gets a CPUID leaf.
1264 *
1265 * @param pVCpu The cross context virtual CPU structure.
1266 * @param uLeaf The CPUID leaf to get.
1267 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1268 * @param pEax Where to store the EAX value.
1269 * @param pEbx Where to store the EBX value.
1270 * @param pEcx Where to store the ECX value.
1271 * @param pEdx Where to store the EDX value.
1272 */
1273VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1274 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1275{
1276 bool fExactSubLeafHit;
1277 PVM pVM = pVCpu->CTX_SUFF(pVM);
1278 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1279 if (pLeaf)
1280 {
1281 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1282 if (fExactSubLeafHit)
1283 {
1284 *pEax = pLeaf->uEax;
1285 *pEbx = pLeaf->uEbx;
1286 *pEcx = pLeaf->uEcx;
1287 *pEdx = pLeaf->uEdx;
1288
1289 /*
1290 * Deal with CPU specific information (currently only APIC ID).
1291 */
1292 if (pLeaf->fFlags & (CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE))
1293 {
1294 if (uLeaf == 1)
1295 {
1296 /* EBX: Bits 31-24: Initial APIC ID. */
1297 Assert(pVCpu->idCpu <= 255);
1298 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1299 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1300
1301 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1302 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1303 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1304 }
1305 else if (uLeaf == 0xb)
1306 {
1307 /* EDX: Initial extended APIC ID. */
1308 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1309 *pEdx = pVCpu->idCpu;
1310 }
1311 else if (uLeaf == UINT32_C(0x8000001e))
1312 {
1313 /* EAX: Initial extended APIC ID. */
1314 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1315 *pEax = pVCpu->idCpu;
1316 }
1317 else
1318 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1319 }
1320 }
1321 /*
1322 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1323 * them here, but we do the best we can here...
1324 */
1325 else
1326 {
1327 *pEax = *pEbx = *pEcx = *pEdx = 0;
1328 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1329 {
1330 *pEcx = uSubLeaf & 0xff;
1331 *pEdx = pVCpu->idCpu;
1332 }
1333 }
1334 }
1335 else
1336 {
1337 /*
1338 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1339 */
1340 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1341 {
1342 default:
1343 AssertFailed();
1344 case CPUMUNKNOWNCPUID_DEFAULTS:
1345 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1346 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1347 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1348 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1349 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1350 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1351 break;
1352 case CPUMUNKNOWNCPUID_PASSTHRU:
1353 *pEax = uLeaf;
1354 *pEbx = 0;
1355 *pEcx = uSubLeaf;
1356 *pEdx = 0;
1357 break;
1358 }
1359 }
1360 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1361}
1362
1363
1364/**
1365 * Sets a CPUID feature bit.
1366 *
1367 * @param pVM The cross context VM structure.
1368 * @param enmFeature The feature to set.
1369 */
1370VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1371{
1372 PCPUMCPUIDLEAF pLeaf;
1373
1374 switch (enmFeature)
1375 {
1376 /*
1377 * Set the APIC bit in both feature masks.
1378 */
1379 case CPUMCPUIDFEATURE_APIC:
1380 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1381 if (pLeaf)
1382 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
1383
1384 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1385 if ( pLeaf
1386 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1387 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1388
1389 pVM->cpum.s.GuestFeatures.fApic = 1;
1390 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled xAPIC\n"));
1391 break;
1392
1393 /*
1394 * Set the x2APIC bit in the standard feature mask.
1395 */
1396 case CPUMCPUIDFEATURE_X2APIC:
1397 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1398 if (pLeaf)
1399 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
1400 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
1401 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
1402 break;
1403
1404 /*
1405 * Set the sysenter/sysexit bit in the standard feature mask.
1406 * Assumes the caller knows what it's doing! (host must support these)
1407 */
1408 case CPUMCPUIDFEATURE_SEP:
1409 if (!pVM->cpum.s.HostFeatures.fSysEnter)
1410 {
1411 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1412 return;
1413 }
1414
1415 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1416 if (pLeaf)
1417 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
1418 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
1419 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
1420 break;
1421
1422 /*
1423 * Set the syscall/sysret bit in the extended feature mask.
1424 * Assumes the caller knows what it's doing! (host must support these)
1425 */
1426 case CPUMCPUIDFEATURE_SYSCALL:
1427 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1428 if ( !pLeaf
1429 || !pVM->cpum.s.HostFeatures.fSysCall)
1430 {
1431#if HC_ARCH_BITS == 32
1432 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
1433 mode by Intel, even when the cpu is capable of doing so in
1434 64-bit mode. Long mode requires syscall support. */
1435 if (!pVM->cpum.s.HostFeatures.fLongMode)
1436#endif
1437 {
1438 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
1439 return;
1440 }
1441 }
1442
1443 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1444 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1445 pVM->cpum.s.GuestFeatures.fSysCall = 1;
1446 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
1447 break;
1448
1449 /*
1450 * Set the PAE bit in both feature masks.
1451 * Assumes the caller knows what it's doing! (host must support these)
1452 */
1453 case CPUMCPUIDFEATURE_PAE:
1454 if (!pVM->cpum.s.HostFeatures.fPae)
1455 {
1456 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
1457 return;
1458 }
1459
1460 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1461 if (pLeaf)
1462 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
1463
1464 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1465 if ( pLeaf
1466 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1467 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1468
1469 pVM->cpum.s.GuestFeatures.fPae = 1;
1470 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
1471 break;
1472
1473 /*
1474 * Set the LONG MODE bit in the extended feature mask.
1475 * Assumes the caller knows what it's doing! (host must support these)
1476 */
1477 case CPUMCPUIDFEATURE_LONG_MODE:
1478 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1479 if ( !pLeaf
1480 || !pVM->cpum.s.HostFeatures.fLongMode)
1481 {
1482 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
1483 return;
1484 }
1485
1486 /* Valid for both Intel and AMD. */
1487 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1488 pVM->cpum.s.GuestFeatures.fLongMode = 1;
1489 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
1490 break;
1491
1492 /*
1493 * Set the NX/XD bit in the extended feature mask.
1494 * Assumes the caller knows what it's doing! (host must support these)
1495 */
1496 case CPUMCPUIDFEATURE_NX:
1497 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1498 if ( !pLeaf
1499 || !pVM->cpum.s.HostFeatures.fNoExecute)
1500 {
1501 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
1502 return;
1503 }
1504
1505 /* Valid for both Intel and AMD. */
1506 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1507 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
1508 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
1509 break;
1510
1511
1512 /*
1513 * Set the LAHF/SAHF support in 64-bit mode.
1514 * Assumes the caller knows what it's doing! (host must support this)
1515 */
1516 case CPUMCPUIDFEATURE_LAHF:
1517 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1518 if ( !pLeaf
1519 || !pVM->cpum.s.HostFeatures.fLahfSahf)
1520 {
1521 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
1522 return;
1523 }
1524
1525 /* Valid for both Intel and AMD. */
1526 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1527 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
1528 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1529 break;
1530
1531 /*
1532 * Set the page attribute table bit. This is alternative page level
1533 * cache control that doesn't much matter when everything is
1534 * virtualized, though it may when passing thru device memory.
1535 */
1536 case CPUMCPUIDFEATURE_PAT:
1537 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1538 if (pLeaf)
1539 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
1540
1541 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1542 if ( pLeaf
1543 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1544 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1545
1546 pVM->cpum.s.GuestFeatures.fPat = 1;
1547 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
1548 break;
1549
1550 /*
1551 * Set the RDTSCP support bit.
1552 * Assumes the caller knows what it's doing! (host must support this)
1553 */
1554 case CPUMCPUIDFEATURE_RDTSCP:
1555 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1556 if ( !pLeaf
1557 || !pVM->cpum.s.HostFeatures.fRdTscP
1558 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1559 {
1560 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1561 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
1562 return;
1563 }
1564
1565 /* Valid for both Intel and AMD. */
1566 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1567 pVM->cpum.s.HostFeatures.fRdTscP = 1;
1568 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1569 break;
1570
1571 /*
1572 * Set the Hypervisor Present bit in the standard feature mask.
1573 */
1574 case CPUMCPUIDFEATURE_HVP:
1575 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1576 if (pLeaf)
1577 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
1578 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
1579 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1580 break;
1581
1582 /*
1583 * Set the MWAIT Extensions Present bit in the MWAIT/MONITOR leaf.
1584 * This currently includes the Present bit and MWAITBREAK bit as well.
1585 */
1586 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1587 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1588 if ( !pLeaf
1589 || !pVM->cpum.s.HostFeatures.fMWaitExtensions)
1590 {
1591 LogRel(("CPUM: WARNING! Can't turn on MWAIT Extensions when the host doesn't support it!\n"));
1592 return;
1593 }
1594
1595 /* Valid for both Intel and AMD. */
1596 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1597 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
1598 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
1599 break;
1600
1601 /*
1602 * OSXSAVE - only used from CPUMSetGuestCR4.
1603 */
1604 case CPUMCPUIDFEATURE_OSXSAVE:
1605 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
1606
1607 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1608 AssertLogRelReturnVoid(pLeaf);
1609
1610 /* UNI: Special case for single CPU to make life simple for CPUMPatchHlpCpuId. */
1611 if (pVM->cCpus == 1)
1612 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_OSXSAVE;
1613 /* SMP: Set flag indicating OSXSAVE updating (superfluous because of the APIC ID, but that's fine). */
1614 else
1615 ASMAtomicOrU32(&pLeaf->fFlags, CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE);
1616 break;
1617
1618 default:
1619 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1620 break;
1621 }
1622
1623 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1624 {
1625 PVMCPU pVCpu = &pVM->aCpus[i];
1626 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1627 }
1628}
1629
1630
1631/**
1632 * Queries a CPUID feature bit.
1633 *
1634 * @returns boolean for feature presence
1635 * @param pVM The cross context VM structure.
1636 * @param enmFeature The feature to query.
1637 */
1638VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1639{
1640 switch (enmFeature)
1641 {
1642 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
1643 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
1644 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
1645 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
1646 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
1647 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
1648 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
1649 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
1650 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat;
1651 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
1652 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
1653 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
1654
1655 case CPUMCPUIDFEATURE_OSXSAVE:
1656 case CPUMCPUIDFEATURE_INVALID:
1657 case CPUMCPUIDFEATURE_32BIT_HACK:
1658 break;
1659 }
1660 AssertFailed();
1661 return false;
1662}
1663
1664
1665/**
1666 * Clears a CPUID feature bit.
1667 *
1668 * @param pVM The cross context VM structure.
1669 * @param enmFeature The feature to clear.
1670 */
1671VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1672{
1673 PCPUMCPUIDLEAF pLeaf;
1674 switch (enmFeature)
1675 {
1676 case CPUMCPUIDFEATURE_APIC:
1677 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1678 if (pLeaf)
1679 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1680
1681 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1682 if ( pLeaf
1683 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1684 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1685
1686 pVM->cpum.s.GuestFeatures.fApic = 0;
1687 Log(("CPUM: ClearGuestCpuIdFeature: Disabled xAPIC\n"));
1688 break;
1689
1690 case CPUMCPUIDFEATURE_X2APIC:
1691 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1692 if (pLeaf)
1693 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
1694 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
1695 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
1696 break;
1697
1698 case CPUMCPUIDFEATURE_PAE:
1699 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1700 if (pLeaf)
1701 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
1702
1703 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1704 if ( pLeaf
1705 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1706 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
1707
1708 pVM->cpum.s.GuestFeatures.fPae = 0;
1709 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
1710 break;
1711
1712 case CPUMCPUIDFEATURE_PAT:
1713 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1714 if (pLeaf)
1715 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
1716
1717 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1718 if ( pLeaf
1719 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
1720 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
1721
1722 pVM->cpum.s.GuestFeatures.fPat = 0;
1723 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
1724 break;
1725
1726 case CPUMCPUIDFEATURE_LONG_MODE:
1727 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1728 if (pLeaf)
1729 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1730 pVM->cpum.s.GuestFeatures.fLongMode = 0;
1731 break;
1732
1733 case CPUMCPUIDFEATURE_LAHF:
1734 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1735 if (pLeaf)
1736 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1737 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
1738 break;
1739
1740 case CPUMCPUIDFEATURE_RDTSCP:
1741 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1742 if (pLeaf)
1743 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1744 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
1745 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
1746 break;
1747
1748 case CPUMCPUIDFEATURE_HVP:
1749 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1750 if (pLeaf)
1751 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
1752 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
1753 break;
1754
1755 case CPUMCPUIDFEATURE_MWAIT_EXTS:
1756 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005));
1757 if (pLeaf)
1758 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1759 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
1760 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
1761 break;
1762
1763 /*
1764 * OSXSAVE - only used from CPUMSetGuestCR4.
1765 */
1766 case CPUMCPUIDFEATURE_OSXSAVE:
1767 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
1768
1769 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1770 AssertLogRelReturnVoid(pLeaf);
1771
1772 /* UNI: Special case for single CPU to make life easy for CPUMPatchHlpCpuId. */
1773 if (pVM->cCpus == 1)
1774 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_OSXSAVE;
1775 /* else: SMP: We never set the OSXSAVE bit and leaving the CONTAINS_OSXSAVE flag is fine. */
1776 break;
1777
1778
1779 default:
1780 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1781 break;
1782 }
1783
1784 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1785 {
1786 PVMCPU pVCpu = &pVM->aCpus[i];
1787 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1788 }
1789}
1790
1791
1792/**
1793 * Gets the host CPU vendor.
1794 *
1795 * @returns CPU vendor.
1796 * @param pVM The cross context VM structure.
1797 */
1798VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1799{
1800 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1801}
1802
1803
1804/**
1805 * Gets the CPU vendor.
1806 *
1807 * @returns CPU vendor.
1808 * @param pVM The cross context VM structure.
1809 */
1810VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1811{
1812 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1813}
1814
1815
1816VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1817{
1818 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1819 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1820}
1821
1822
1823VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1824{
1825 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1826 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1827}
1828
1829
1830VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1831{
1832 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1833 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1834}
1835
1836
1837VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1838{
1839 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1840 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1841}
1842
1843
1844VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1845{
1846 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1847 return VINF_SUCCESS; /* No need to recalc. */
1848}
1849
1850
1851VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1852{
1853 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1854 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1855}
1856
1857
1858VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1859{
1860 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1861 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1862 if (iReg == 4 || iReg == 5)
1863 iReg += 2;
1864 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1865 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1866}
1867
1868
1869/**
1870 * Recalculates the hypervisor DRx register values based on current guest
1871 * registers and DBGF breakpoints, updating changed registers depending on the
1872 * context.
1873 *
1874 * This is called whenever a guest DRx register is modified (any context) and
1875 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1876 *
1877 * In raw-mode context this function will reload any (hyper) DRx registers which
1878 * comes out with a different value. It may also have to save the host debug
1879 * registers if that haven't been done already. In this context though, we'll
1880 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1881 * are only important when breakpoints are actually enabled.
1882 *
1883 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1884 * reloaded by the HM code if it changes. Further more, we will only use the
1885 * combined register set when the VBox debugger is actually using hardware BPs,
1886 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1887 * concern us here).
1888 *
1889 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1890 * all the time.
1891 *
1892 * @returns VINF_SUCCESS.
1893 * @param pVCpu The cross context virtual CPU structure.
1894 * @param iGstReg The guest debug register number that was modified.
1895 * UINT8_MAX if not guest register.
1896 * @param fForceHyper Used in HM to force hyper registers because of single
1897 * stepping.
1898 */
1899VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1900{
1901 PVM pVM = pVCpu->CTX_SUFF(pVM);
1902
1903 /*
1904 * Compare the DR7s first.
1905 *
1906 * We only care about the enabled flags. GD is virtualized when we
1907 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1908 * always have the LE and GE bits set, so no need to check and disable
1909 * stuff if they're cleared like we have to for the guest DR7.
1910 */
1911 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1912 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1913 uGstDr7 = 0;
1914 else if (!(uGstDr7 & X86_DR7_LE))
1915 uGstDr7 &= ~X86_DR7_LE_ALL;
1916 else if (!(uGstDr7 & X86_DR7_GE))
1917 uGstDr7 &= ~X86_DR7_GE_ALL;
1918
1919 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1920
1921#ifdef IN_RING0
1922 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1923 fForceHyper = true;
1924#endif
1925 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1926 {
1927 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1928#ifdef IN_RC
1929 bool const fHmEnabled = false;
1930#elif defined(IN_RING3)
1931 bool const fHmEnabled = HMIsEnabled(pVM);
1932#endif
1933
1934 /*
1935 * Ok, something is enabled. Recalc each of the breakpoints, taking
1936 * the VM debugger ones of the guest ones. In raw-mode context we will
1937 * not allow breakpoints with values inside the hypervisor area.
1938 */
1939 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1940
1941 /* bp 0 */
1942 RTGCUINTREG uNewDr0;
1943 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1944 {
1945 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1946 uNewDr0 = DBGFBpGetDR0(pVM);
1947 }
1948 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1949 {
1950 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1951#ifndef IN_RING0
1952 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1953 uNewDr0 = 0;
1954 else
1955#endif
1956 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1957 }
1958 else
1959 uNewDr0 = 0;
1960
1961 /* bp 1 */
1962 RTGCUINTREG uNewDr1;
1963 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1964 {
1965 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1966 uNewDr1 = DBGFBpGetDR1(pVM);
1967 }
1968 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1969 {
1970 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1971#ifndef IN_RING0
1972 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1973 uNewDr1 = 0;
1974 else
1975#endif
1976 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1977 }
1978 else
1979 uNewDr1 = 0;
1980
1981 /* bp 2 */
1982 RTGCUINTREG uNewDr2;
1983 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1984 {
1985 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1986 uNewDr2 = DBGFBpGetDR2(pVM);
1987 }
1988 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1989 {
1990 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1991#ifndef IN_RING0
1992 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1993 uNewDr2 = 0;
1994 else
1995#endif
1996 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1997 }
1998 else
1999 uNewDr2 = 0;
2000
2001 /* bp 3 */
2002 RTGCUINTREG uNewDr3;
2003 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2004 {
2005 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2006 uNewDr3 = DBGFBpGetDR3(pVM);
2007 }
2008 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2009 {
2010 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2011#ifndef IN_RING0
2012 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
2013 uNewDr3 = 0;
2014 else
2015#endif
2016 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2017 }
2018 else
2019 uNewDr3 = 0;
2020
2021 /*
2022 * Apply the updates.
2023 */
2024#ifdef IN_RC
2025 /* Make sure to save host registers first. */
2026 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
2027 {
2028 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
2029 {
2030 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
2031 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
2032 }
2033 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
2034 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
2035 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
2036 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
2037 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
2038
2039 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
2040 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
2041 ASMSetDR0(uNewDr0);
2042 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
2043 ASMSetDR1(uNewDr1);
2044 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
2045 ASMSetDR2(uNewDr2);
2046 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
2047 ASMSetDR3(uNewDr3);
2048 ASMSetDR6(X86_DR6_INIT_VAL);
2049 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
2050 ASMSetDR7(uNewDr7);
2051 }
2052 else
2053#endif
2054 {
2055 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
2056 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2057 CPUMSetHyperDR3(pVCpu, uNewDr3);
2058 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2059 CPUMSetHyperDR2(pVCpu, uNewDr2);
2060 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2061 CPUMSetHyperDR1(pVCpu, uNewDr1);
2062 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2063 CPUMSetHyperDR0(pVCpu, uNewDr0);
2064 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2065 CPUMSetHyperDR7(pVCpu, uNewDr7);
2066 }
2067 }
2068#ifdef IN_RING0
2069 else if (CPUMIsGuestDebugStateActive(pVCpu))
2070 {
2071 /*
2072 * Reload the register that was modified. Normally this won't happen
2073 * as we won't intercept DRx writes when not having the hyper debug
2074 * state loaded, but in case we do for some reason we'll simply deal
2075 * with it.
2076 */
2077 switch (iGstReg)
2078 {
2079 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
2080 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
2081 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
2082 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
2083 default:
2084 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
2085 }
2086 }
2087#endif
2088 else
2089 {
2090 /*
2091 * No active debug state any more. In raw-mode this means we have to
2092 * make sure DR7 has everything disabled now, if we armed it already.
2093 * In ring-0 we might end up here when just single stepping.
2094 */
2095#if defined(IN_RC) || defined(IN_RING0)
2096 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
2097 {
2098# ifdef IN_RC
2099 ASMSetDR7(X86_DR7_INIT_VAL);
2100# endif
2101 if (pVCpu->cpum.s.Hyper.dr[0])
2102 ASMSetDR0(0);
2103 if (pVCpu->cpum.s.Hyper.dr[1])
2104 ASMSetDR1(0);
2105 if (pVCpu->cpum.s.Hyper.dr[2])
2106 ASMSetDR2(0);
2107 if (pVCpu->cpum.s.Hyper.dr[3])
2108 ASMSetDR3(0);
2109 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
2110 }
2111#endif
2112 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2113
2114 /* Clear all the registers. */
2115 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
2116 pVCpu->cpum.s.Hyper.dr[3] = 0;
2117 pVCpu->cpum.s.Hyper.dr[2] = 0;
2118 pVCpu->cpum.s.Hyper.dr[1] = 0;
2119 pVCpu->cpum.s.Hyper.dr[0] = 0;
2120
2121 }
2122 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2123 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2124 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2125 pVCpu->cpum.s.Hyper.dr[7]));
2126
2127 return VINF_SUCCESS;
2128}
2129
2130
2131/**
2132 * Set the guest XCR0 register.
2133 *
2134 * Will load additional state if the FPU state is already loaded (in ring-0 &
2135 * raw-mode context).
2136 *
2137 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
2138 * value.
2139 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2140 * @param uNewValue The new value.
2141 * @thread EMT(pVCpu)
2142 */
2143VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
2144{
2145 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
2146 /* The X87 bit cannot be cleared. */
2147 && (uNewValue & XSAVE_C_X87)
2148 /* AVX requires SSE. */
2149 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
2150 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
2151 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
2152 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
2153 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
2154 )
2155 {
2156 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
2157
2158 /* If more state components are enabled, we need to take care to load
2159 them if the FPU/SSE state is already loaded. May otherwise leak
2160 host state to the guest. */
2161 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
2162 if (fNewComponents)
2163 {
2164#if defined(IN_RING0) || defined(IN_RC)
2165 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
2166 {
2167 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
2168 /* Adding more components. */
2169 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
2170 else
2171 {
2172 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
2173 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
2174 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
2175 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
2176 }
2177 }
2178#endif
2179 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
2180 }
2181 return VINF_SUCCESS;
2182 }
2183 return VERR_CPUM_RAISE_GP_0;
2184}
2185
2186
2187/**
2188 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2189 *
2190 * @returns true if in real mode, otherwise false.
2191 * @param pVCpu The cross context virtual CPU structure.
2192 */
2193VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2194{
2195 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2196}
2197
2198
2199/**
2200 * Tests if the guest has the Page Size Extension enabled (PSE).
2201 *
2202 * @returns true if in real mode, otherwise false.
2203 * @param pVCpu The cross context virtual CPU structure.
2204 */
2205VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2206{
2207 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2208 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2209}
2210
2211
2212/**
2213 * Tests if the guest has the paging enabled (PG).
2214 *
2215 * @returns true if in real mode, otherwise false.
2216 * @param pVCpu The cross context virtual CPU structure.
2217 */
2218VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2219{
2220 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2221}
2222
2223
2224/**
2225 * Tests if the guest has the paging enabled (PG).
2226 *
2227 * @returns true if in real mode, otherwise false.
2228 * @param pVCpu The cross context virtual CPU structure.
2229 */
2230VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2231{
2232 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2233}
2234
2235
2236/**
2237 * Tests if the guest is running in real mode or not.
2238 *
2239 * @returns true if in real mode, otherwise false.
2240 * @param pVCpu The cross context virtual CPU structure.
2241 */
2242VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2243{
2244 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2245}
2246
2247
2248/**
2249 * Tests if the guest is running in real or virtual 8086 mode.
2250 *
2251 * @returns @c true if it is, @c false if not.
2252 * @param pVCpu The cross context virtual CPU structure.
2253 */
2254VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2255{
2256 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2257 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2258}
2259
2260
2261/**
2262 * Tests if the guest is running in protected or not.
2263 *
2264 * @returns true if in protected mode, otherwise false.
2265 * @param pVCpu The cross context virtual CPU structure.
2266 */
2267VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2268{
2269 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2270}
2271
2272
2273/**
2274 * Tests if the guest is running in paged protected or not.
2275 *
2276 * @returns true if in paged protected mode, otherwise false.
2277 * @param pVCpu The cross context virtual CPU structure.
2278 */
2279VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2280{
2281 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2282}
2283
2284
2285/**
2286 * Tests if the guest is running in long mode or not.
2287 *
2288 * @returns true if in long mode, otherwise false.
2289 * @param pVCpu The cross context virtual CPU structure.
2290 */
2291VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2292{
2293 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2294}
2295
2296
2297/**
2298 * Tests if the guest is running in PAE mode or not.
2299 *
2300 * @returns true if in PAE mode, otherwise false.
2301 * @param pVCpu The cross context virtual CPU structure.
2302 */
2303VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2304{
2305 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2306 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2307 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2308 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2309 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2310}
2311
2312
2313/**
2314 * Tests if the guest is running in 64 bits mode or not.
2315 *
2316 * @returns true if in 64 bits protected mode, otherwise false.
2317 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2318 */
2319VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2320{
2321 if (!CPUMIsGuestInLongMode(pVCpu))
2322 return false;
2323 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2324 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2325}
2326
2327
2328/**
2329 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2330 * registers.
2331 *
2332 * @returns true if in 64 bits protected mode, otherwise false.
2333 * @param pCtx Pointer to the current guest CPU context.
2334 */
2335VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2336{
2337 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2338}
2339
2340#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2341
2342/**
2343 *
2344 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2345 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2346 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2347 */
2348VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2349{
2350 return pVCpu->cpum.s.fRawEntered;
2351}
2352
2353/**
2354 * Transforms the guest CPU state to raw-ring mode.
2355 *
2356 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2357 *
2358 * @returns VBox status code. (recompiler failure)
2359 * @param pVCpu The cross context virtual CPU structure.
2360 * @see @ref pg_raw
2361 */
2362VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2363{
2364 PVM pVM = pVCpu->CTX_SUFF(pVM);
2365
2366 Assert(!pVCpu->cpum.s.fRawEntered);
2367 Assert(!pVCpu->cpum.s.fRemEntered);
2368 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2369
2370 /*
2371 * Are we in Ring-0?
2372 */
2373 if ( pCtx->ss.Sel
2374 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2375 && !pCtx->eflags.Bits.u1VM)
2376 {
2377 /*
2378 * Enter execution mode.
2379 */
2380 PATMRawEnter(pVM, pCtx);
2381
2382 /*
2383 * Set CPL to Ring-1.
2384 */
2385 pCtx->ss.Sel |= 1;
2386 if ( pCtx->cs.Sel
2387 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2388 pCtx->cs.Sel |= 1;
2389 }
2390 else
2391 {
2392# ifdef VBOX_WITH_RAW_RING1
2393 if ( EMIsRawRing1Enabled(pVM)
2394 && !pCtx->eflags.Bits.u1VM
2395 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2396 {
2397 /* Set CPL to Ring-2. */
2398 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2399 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2400 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2401 }
2402# else
2403 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2404 ("ring-1 code not supported\n"));
2405# endif
2406 /*
2407 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2408 */
2409 PATMRawEnter(pVM, pCtx);
2410 }
2411
2412 /*
2413 * Assert sanity.
2414 */
2415 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2416 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2417 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2418 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2419
2420 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2421
2422 pVCpu->cpum.s.fRawEntered = true;
2423 return VINF_SUCCESS;
2424}
2425
2426
2427/**
2428 * Transforms the guest CPU state from raw-ring mode to correct values.
2429 *
2430 * This function will change any selector registers with DPL=1 to DPL=0.
2431 *
2432 * @returns Adjusted rc.
2433 * @param pVCpu The cross context virtual CPU structure.
2434 * @param rc Raw mode return code
2435 * @see @ref pg_raw
2436 */
2437VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2438{
2439 PVM pVM = pVCpu->CTX_SUFF(pVM);
2440
2441 /*
2442 * Don't leave if we've already left (in RC).
2443 */
2444 Assert(!pVCpu->cpum.s.fRemEntered);
2445 if (!pVCpu->cpum.s.fRawEntered)
2446 return rc;
2447 pVCpu->cpum.s.fRawEntered = false;
2448
2449 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2450 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2451 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2452 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2453
2454 /*
2455 * Are we executing in raw ring-1?
2456 */
2457 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2458 && !pCtx->eflags.Bits.u1VM)
2459 {
2460 /*
2461 * Leave execution mode.
2462 */
2463 PATMRawLeave(pVM, pCtx, rc);
2464 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2465 /** @todo See what happens if we remove this. */
2466 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2467 pCtx->ds.Sel &= ~X86_SEL_RPL;
2468 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2469 pCtx->es.Sel &= ~X86_SEL_RPL;
2470 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2471 pCtx->fs.Sel &= ~X86_SEL_RPL;
2472 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2473 pCtx->gs.Sel &= ~X86_SEL_RPL;
2474
2475 /*
2476 * Ring-1 selector => Ring-0.
2477 */
2478 pCtx->ss.Sel &= ~X86_SEL_RPL;
2479 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2480 pCtx->cs.Sel &= ~X86_SEL_RPL;
2481 }
2482 else
2483 {
2484 /*
2485 * PATM is taking care of the IOPL and IF flags for us.
2486 */
2487 PATMRawLeave(pVM, pCtx, rc);
2488 if (!pCtx->eflags.Bits.u1VM)
2489 {
2490# ifdef VBOX_WITH_RAW_RING1
2491 if ( EMIsRawRing1Enabled(pVM)
2492 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2493 {
2494 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2495 /** @todo See what happens if we remove this. */
2496 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2497 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2498 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2499 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2500 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2501 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2502 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2503 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2504
2505 /*
2506 * Ring-2 selector => Ring-1.
2507 */
2508 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2509 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2510 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2511 }
2512 else
2513 {
2514# endif
2515 /** @todo See what happens if we remove this. */
2516 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2517 pCtx->ds.Sel &= ~X86_SEL_RPL;
2518 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2519 pCtx->es.Sel &= ~X86_SEL_RPL;
2520 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2521 pCtx->fs.Sel &= ~X86_SEL_RPL;
2522 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2523 pCtx->gs.Sel &= ~X86_SEL_RPL;
2524# ifdef VBOX_WITH_RAW_RING1
2525 }
2526# endif
2527 }
2528 }
2529
2530 return rc;
2531}
2532
2533#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2534
2535/**
2536 * Updates the EFLAGS while we're in raw-mode.
2537 *
2538 * @param pVCpu The cross context virtual CPU structure.
2539 * @param fEfl The new EFLAGS value.
2540 */
2541VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2542{
2543#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2544 if (pVCpu->cpum.s.fRawEntered)
2545 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2546 else
2547#endif
2548 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2549}
2550
2551
2552/**
2553 * Gets the EFLAGS while we're in raw-mode.
2554 *
2555 * @returns The eflags.
2556 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2557 */
2558VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2559{
2560#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2561 if (pVCpu->cpum.s.fRawEntered)
2562 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2563#endif
2564 return pVCpu->cpum.s.Guest.eflags.u32;
2565}
2566
2567
2568/**
2569 * Sets the specified changed flags (CPUM_CHANGED_*).
2570 *
2571 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2572 * @param fChangedAdd The changed flags to add.
2573 */
2574VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2575{
2576 pVCpu->cpum.s.fChanged |= fChangedAdd;
2577}
2578
2579
2580/**
2581 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2582 *
2583 * @returns true if supported.
2584 * @returns false if not supported.
2585 * @param pVM The cross context VM structure.
2586 */
2587VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2588{
2589 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2590}
2591
2592
2593/**
2594 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2595 * @returns true if used.
2596 * @returns false if not used.
2597 * @param pVM The cross context VM structure.
2598 */
2599VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2600{
2601 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2602}
2603
2604
2605/**
2606 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2607 * @returns true if used.
2608 * @returns false if not used.
2609 * @param pVM The cross context VM structure.
2610 */
2611VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2612{
2613 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2614}
2615
2616#ifdef IN_RC
2617
2618/**
2619 * Lazily sync in the FPU/XMM state.
2620 *
2621 * @returns VBox status code.
2622 * @param pVCpu The cross context virtual CPU structure.
2623 */
2624VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2625{
2626 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2627}
2628
2629#endif /* !IN_RC */
2630
2631/**
2632 * Checks if we activated the FPU/XMM state of the guest OS.
2633 *
2634 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2635 * time we'll be executing guest code, so it may return true for 64-on-32 when
2636 * we still haven't actually loaded the FPU status, just scheduled it to be
2637 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2638 *
2639 * @returns true / false.
2640 * @param pVCpu The cross context virtual CPU structure.
2641 */
2642VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2643{
2644 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2645}
2646
2647
2648/**
2649 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2650 *
2651 * @returns true / false.
2652 * @param pVCpu The cross context virtual CPU structure.
2653 */
2654VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2655{
2656 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2657}
2658
2659
2660/**
2661 * Checks if we saved the FPU/XMM state of the host OS.
2662 *
2663 * @returns true / false.
2664 * @param pVCpu The cross context virtual CPU structure.
2665 */
2666VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2667{
2668 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2669}
2670
2671
2672/**
2673 * Checks if the guest debug state is active.
2674 *
2675 * @returns boolean
2676 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2677 */
2678VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2679{
2680 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2681}
2682
2683
2684/**
2685 * Checks if the guest debug state is to be made active during the world-switch
2686 * (currently only used for the 32->64 switcher case).
2687 *
2688 * @returns boolean
2689 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2690 */
2691VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2692{
2693 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2694}
2695
2696
2697/**
2698 * Checks if the hyper debug state is active.
2699 *
2700 * @returns boolean
2701 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2702 */
2703VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2704{
2705 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2706}
2707
2708
2709/**
2710 * Checks if the hyper debug state is to be made active during the world-switch
2711 * (currently only used for the 32->64 switcher case).
2712 *
2713 * @returns boolean
2714 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2715 */
2716VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2717{
2718 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2719}
2720
2721
2722/**
2723 * Mark the guest's debug state as inactive.
2724 *
2725 * @returns boolean
2726 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2727 * @todo This API doesn't make sense any more.
2728 */
2729VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2730{
2731 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2732 NOREF(pVCpu);
2733}
2734
2735
2736/**
2737 * Get the current privilege level of the guest.
2738 *
2739 * @returns CPL
2740 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2741 */
2742VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2743{
2744 /*
2745 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2746 *
2747 * Note! We used to check CS.DPL here, assuming it was always equal to
2748 * CPL even if a conforming segment was loaded. But this truned out to
2749 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2750 * during install after a far call to ring 2 with VT-x. Then on newer
2751 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2752 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2753 *
2754 * So, forget CS.DPL, always use SS.DPL.
2755 *
2756 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2757 * isn't necessarily equal if the segment is conforming.
2758 * See section 4.11.1 in the AMD manual.
2759 *
2760 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2761 * right after real->prot mode switch and when in V8086 mode? That
2762 * section says the RPL specified in a direct transfere (call, jmp,
2763 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2764 * it would be impossible for an exception handle or the iret
2765 * instruction to figure out whether SS:ESP are part of the frame
2766 * or not. VBox or qemu bug must've lead to this misconception.
2767 *
2768 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2769 * selector into SS with an RPL other than the CPL when CPL != 3 and
2770 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2771 * RPL = CPL. Weird.
2772 */
2773 uint32_t uCpl;
2774 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2775 {
2776 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2777 {
2778 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2779 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2780 else
2781 {
2782 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2783#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2784# ifdef VBOX_WITH_RAW_RING1
2785 if (pVCpu->cpum.s.fRawEntered)
2786 {
2787 if ( uCpl == 2
2788 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2789 uCpl = 1;
2790 else if (uCpl == 1)
2791 uCpl = 0;
2792 }
2793 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2794# else
2795 if (uCpl == 1)
2796 uCpl = 0;
2797# endif
2798#endif
2799 }
2800 }
2801 else
2802 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2803 }
2804 else
2805 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2806 return uCpl;
2807}
2808
2809
2810/**
2811 * Gets the current guest CPU mode.
2812 *
2813 * If paging mode is what you need, check out PGMGetGuestMode().
2814 *
2815 * @returns The CPU mode.
2816 * @param pVCpu The cross context virtual CPU structure.
2817 */
2818VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2819{
2820 CPUMMODE enmMode;
2821 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2822 enmMode = CPUMMODE_REAL;
2823 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2824 enmMode = CPUMMODE_PROTECTED;
2825 else
2826 enmMode = CPUMMODE_LONG;
2827
2828 return enmMode;
2829}
2830
2831
2832/**
2833 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2834 *
2835 * @returns 16, 32 or 64.
2836 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2837 */
2838VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2839{
2840 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2841 return 16;
2842
2843 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2844 {
2845 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2846 return 16;
2847 }
2848
2849 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2850 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2851 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2852 return 64;
2853
2854 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2855 return 32;
2856
2857 return 16;
2858}
2859
2860
2861VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2862{
2863 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2864 return DISCPUMODE_16BIT;
2865
2866 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2867 {
2868 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2869 return DISCPUMODE_16BIT;
2870 }
2871
2872 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2873 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2874 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2875 return DISCPUMODE_64BIT;
2876
2877 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2878 return DISCPUMODE_32BIT;
2879
2880 return DISCPUMODE_16BIT;
2881}
2882
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette