VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 62209

Last change on this file since 62209 was 61776, checked in by vboxsync, 9 years ago

CPUM,APIC: Per-CPU APIC CPUID feature bit and MSR_IA32_APICBASE GP mask adjustments.

  • Changed the PDMAPICHLPR3::pfnChangeFeature to pfnSetFeatureLevel, removing the RC and R0 versions.
  • Only use pfnSetFeatureLevel from the APIC constructor to communicate to CPUM the max APIC feature level, not to globally flip CPUID[1].EDX[9].
  • Renamed APIC enmOriginalMode to enmMaxMode, changing the type of it and the corresponding config values to PDMAPICMODE. This makes the above simpler and eliminates two conversion functions. It also makes APICMODE private to the APIC again.
  • Introduced CPUMSetGuestCpuIdPerCpuApicFeature for the per-CPU APIC feature bit management.
  • Introduced CPUMCPUIDLEAF_F_CONTAINS_APIC which works same as CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE and CPUMCPUIDLEAF_F_CONTAINS_APIC_ID. Updated existing CPU profiles with this.
  • Made the patch manager helper function actually handle CPUMCPUIDLEAF_F_CONTAINS_APIC and CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE (the latter previously relied on CPUMSetGuestCpuIdFeature/CPUMClearGuestCpuIdFeature from CPUMSetGuestCR4).
  • Pushed CPUMSetGuestCpuIdFeature, CPUMGetGuestCpuIdFeature and CPUMClearGuestCpuIdFeature down to ring-3 only (now CPUMR3*). The latter two function are deprecated.
  • Added call to CPUMSetGuestCpuIdPerCpuApicFeature from load function just in case the APIC is disabled by the guest at the time of saving.
  • CPUMSetGuestCpuIdFeature ensures we've got a MSR_IA32_APICBASE register when enabling the APIC.
  • CPUMSetGuestCpuIdFeature adjust the MSR_IA32_APICBASE GP mask when enabling x2APIC so setting MSR_IA32_APICBASE_EXTD does not trap.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 71.2 KB
Line 
1/* $Id: CPUMAllRegs.cpp 61776 2016-06-20 23:25:06Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 */
157VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
158{
159 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
160}
161
162#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
163
164
165/**
166 * Obsolete.
167 *
168 * We don't support nested hypervisor context interrupts or traps. Life is much
169 * simpler when we don't. It's also slightly faster at times.
170 *
171 * @param pVCpu The cross context virtual CPU structure.
172 */
173VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
174{
175 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
176}
177
178
179/**
180 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
181 *
182 * @param pVCpu The cross context virtual CPU structure.
183 */
184VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
185{
186 return &pVCpu->cpum.s.Hyper;
187}
188
189
190VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
191{
192 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
193 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
194}
195
196
197VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
198{
199 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
200 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
201}
202
203
204VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
205{
206 pVCpu->cpum.s.Hyper.cr3 = cr3;
207
208#ifdef IN_RC
209 /* Update the current CR3. */
210 ASMSetCR3(cr3);
211#endif
212}
213
214VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
215{
216 return pVCpu->cpum.s.Hyper.cr3;
217}
218
219
220VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
221{
222 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
223}
224
225
226VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
227{
228 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
229}
230
231
232VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
233{
234 pVCpu->cpum.s.Hyper.es.Sel = SelES;
235}
236
237
238VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
239{
240 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
241}
242
243
244VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
245{
246 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
247}
248
249
250VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
251{
252 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
253}
254
255
256VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
257{
258 pVCpu->cpum.s.Hyper.esp = u32ESP;
259}
260
261
262VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
263{
264 pVCpu->cpum.s.Hyper.esp = u32ESP;
265}
266
267
268VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
269{
270 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
271 return VINF_SUCCESS;
272}
273
274
275VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
276{
277 pVCpu->cpum.s.Hyper.eip = u32EIP;
278}
279
280
281/**
282 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
283 * EFLAGS and EIP prior to resuming guest execution.
284 *
285 * All general register not given as a parameter will be set to 0. The EFLAGS
286 * register will be set to sane values for C/C++ code execution with interrupts
287 * disabled and IOPL 0.
288 *
289 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
290 * @param u32EIP The EIP value.
291 * @param u32ESP The ESP value.
292 * @param u32EAX The EAX value.
293 * @param u32EDX The EDX value.
294 */
295VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
296{
297 pVCpu->cpum.s.Hyper.eip = u32EIP;
298 pVCpu->cpum.s.Hyper.esp = u32ESP;
299 pVCpu->cpum.s.Hyper.eax = u32EAX;
300 pVCpu->cpum.s.Hyper.edx = u32EDX;
301 pVCpu->cpum.s.Hyper.ecx = 0;
302 pVCpu->cpum.s.Hyper.ebx = 0;
303 pVCpu->cpum.s.Hyper.ebp = 0;
304 pVCpu->cpum.s.Hyper.esi = 0;
305 pVCpu->cpum.s.Hyper.edi = 0;
306 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
307}
308
309
310VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
311{
312 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
313}
314
315
316VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
317{
318 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
319}
320
321
322/** @def MAYBE_LOAD_DRx
323 * Macro for updating DRx values in raw-mode and ring-0 contexts.
324 */
325#ifdef IN_RING0
326# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
327# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
328 do { \
329 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
330 a_fnLoad(a_uValue); \
331 else \
332 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
333 } while (0)
334# else
335# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
336 do { \
337 a_fnLoad(a_uValue); \
338 } while (0)
339# endif
340
341#elif defined(IN_RC)
342# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
343 do { \
344 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
345 { a_fnLoad(a_uValue); } \
346 } while (0)
347
348#else
349# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
350#endif
351
352VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
353{
354 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
355 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
356}
357
358
359VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
360{
361 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
362 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
363}
364
365
366VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
367{
368 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
369 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
370}
371
372
373VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
374{
375 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
376 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
377}
378
379
380VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
381{
382 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
383}
384
385
386VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
387{
388 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
389#ifdef IN_RC
390 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
391#endif
392}
393
394
395VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
396{
397 return pVCpu->cpum.s.Hyper.cs.Sel;
398}
399
400
401VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
402{
403 return pVCpu->cpum.s.Hyper.ds.Sel;
404}
405
406
407VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
408{
409 return pVCpu->cpum.s.Hyper.es.Sel;
410}
411
412
413VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
414{
415 return pVCpu->cpum.s.Hyper.fs.Sel;
416}
417
418
419VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
420{
421 return pVCpu->cpum.s.Hyper.gs.Sel;
422}
423
424
425VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
426{
427 return pVCpu->cpum.s.Hyper.ss.Sel;
428}
429
430
431VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
432{
433 return pVCpu->cpum.s.Hyper.eax;
434}
435
436
437VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
438{
439 return pVCpu->cpum.s.Hyper.ebx;
440}
441
442
443VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
444{
445 return pVCpu->cpum.s.Hyper.ecx;
446}
447
448
449VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
450{
451 return pVCpu->cpum.s.Hyper.edx;
452}
453
454
455VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
456{
457 return pVCpu->cpum.s.Hyper.esi;
458}
459
460
461VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
462{
463 return pVCpu->cpum.s.Hyper.edi;
464}
465
466
467VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
468{
469 return pVCpu->cpum.s.Hyper.ebp;
470}
471
472
473VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
474{
475 return pVCpu->cpum.s.Hyper.esp;
476}
477
478
479VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
480{
481 return pVCpu->cpum.s.Hyper.eflags.u32;
482}
483
484
485VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
486{
487 return pVCpu->cpum.s.Hyper.eip;
488}
489
490
491VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
492{
493 return pVCpu->cpum.s.Hyper.rip;
494}
495
496
497VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
498{
499 if (pcbLimit)
500 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
501 return pVCpu->cpum.s.Hyper.idtr.pIdt;
502}
503
504
505VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
506{
507 if (pcbLimit)
508 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
509 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
510}
511
512
513VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
514{
515 return pVCpu->cpum.s.Hyper.ldtr.Sel;
516}
517
518
519VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
520{
521 return pVCpu->cpum.s.Hyper.dr[0];
522}
523
524
525VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
526{
527 return pVCpu->cpum.s.Hyper.dr[1];
528}
529
530
531VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
532{
533 return pVCpu->cpum.s.Hyper.dr[2];
534}
535
536
537VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
538{
539 return pVCpu->cpum.s.Hyper.dr[3];
540}
541
542
543VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
544{
545 return pVCpu->cpum.s.Hyper.dr[6];
546}
547
548
549VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
550{
551 return pVCpu->cpum.s.Hyper.dr[7];
552}
553
554
555/**
556 * Gets the pointer to the internal CPUMCTXCORE structure.
557 * This is only for reading in order to save a few calls.
558 *
559 * @param pVCpu The cross context virtual CPU structure.
560 */
561VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
562{
563 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
564}
565
566
567/**
568 * Queries the pointer to the internal CPUMCTX structure.
569 *
570 * @returns The CPUMCTX pointer.
571 * @param pVCpu The cross context virtual CPU structure.
572 */
573VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
574{
575 return &pVCpu->cpum.s.Guest;
576}
577
578VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
579{
580#ifdef VBOX_WITH_RAW_MODE_NOT_R0
581 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
583#endif
584 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
585 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
586 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
587 return VINF_SUCCESS; /* formality, consider it void. */
588}
589
590VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
591{
592#ifdef VBOX_WITH_RAW_MODE_NOT_R0
593 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
594 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
595#endif
596 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
597 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
598 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
599 return VINF_SUCCESS; /* formality, consider it void. */
600}
601
602VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
603{
604#ifdef VBOX_WITH_RAW_MODE_NOT_R0
605 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
606 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
607#endif
608 pVCpu->cpum.s.Guest.tr.Sel = tr;
609 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
610 return VINF_SUCCESS; /* formality, consider it void. */
611}
612
613VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
614{
615#ifdef VBOX_WITH_RAW_MODE_NOT_R0
616 if ( ( ldtr != 0
617 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
618 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
619 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
620#endif
621 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
622 /* The caller will set more hidden bits if it has them. */
623 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
624 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
625 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
626 return VINF_SUCCESS; /* formality, consider it void. */
627}
628
629
630/**
631 * Set the guest CR0.
632 *
633 * When called in GC, the hyper CR0 may be updated if that is
634 * required. The caller only has to take special action if AM,
635 * WP, PG or PE changes.
636 *
637 * @returns VINF_SUCCESS (consider it void).
638 * @param pVCpu The cross context virtual CPU structure.
639 * @param cr0 The new CR0 value.
640 */
641VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
642{
643#ifdef IN_RC
644 /*
645 * Check if we need to change hypervisor CR0 because
646 * of math stuff.
647 */
648 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
649 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
650 {
651 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
652 {
653 /*
654 * We haven't loaded the guest FPU state yet, so TS and MT are both set
655 * and EM should be reflecting the guest EM (it always does this).
656 */
657 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
658 {
659 uint32_t HyperCR0 = ASMGetCR0();
660 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
661 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
662 HyperCR0 &= ~X86_CR0_EM;
663 HyperCR0 |= cr0 & X86_CR0_EM;
664 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
665 ASMSetCR0(HyperCR0);
666 }
667# ifdef VBOX_STRICT
668 else
669 {
670 uint32_t HyperCR0 = ASMGetCR0();
671 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
672 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
673 }
674# endif
675 }
676 else
677 {
678 /*
679 * Already loaded the guest FPU state, so we're just mirroring
680 * the guest flags.
681 */
682 uint32_t HyperCR0 = ASMGetCR0();
683 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
684 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
685 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
686 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
687 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
688 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
689 ASMSetCR0(HyperCR0);
690 }
691 }
692#endif /* IN_RC */
693
694 /*
695 * Check for changes causing TLB flushes (for REM).
696 * The caller is responsible for calling PGM when appropriate.
697 */
698 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
699 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
700 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
701 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
702
703 /*
704 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
705 */
706 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
707 PGMCr0WpEnabled(pVCpu);
708
709 /* The ET flag is settable on a 386 and hardwired on 486+. */
710 if ( !(cr0 & X86_CR0_ET)
711 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
712 cr0 |= X86_CR0_ET;
713
714 pVCpu->cpum.s.Guest.cr0 = cr0;
715 return VINF_SUCCESS;
716}
717
718
719VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
720{
721 pVCpu->cpum.s.Guest.cr2 = cr2;
722 return VINF_SUCCESS;
723}
724
725
726VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
727{
728 pVCpu->cpum.s.Guest.cr3 = cr3;
729 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
735{
736 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
737
738 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
739 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
740 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
741
742 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
743 pVCpu->cpum.s.Guest.cr4 = cr4;
744 return VINF_SUCCESS;
745}
746
747
748VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
749{
750 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
756{
757 pVCpu->cpum.s.Guest.eip = eip;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
763{
764 pVCpu->cpum.s.Guest.eax = eax;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
770{
771 pVCpu->cpum.s.Guest.ebx = ebx;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
777{
778 pVCpu->cpum.s.Guest.ecx = ecx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
784{
785 pVCpu->cpum.s.Guest.edx = edx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
791{
792 pVCpu->cpum.s.Guest.esp = esp;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
798{
799 pVCpu->cpum.s.Guest.ebp = ebp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
805{
806 pVCpu->cpum.s.Guest.esi = esi;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
812{
813 pVCpu->cpum.s.Guest.edi = edi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
819{
820 pVCpu->cpum.s.Guest.ss.Sel = ss;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
826{
827 pVCpu->cpum.s.Guest.cs.Sel = cs;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
833{
834 pVCpu->cpum.s.Guest.ds.Sel = ds;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
840{
841 pVCpu->cpum.s.Guest.es.Sel = es;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
847{
848 pVCpu->cpum.s.Guest.fs.Sel = fs;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
854{
855 pVCpu->cpum.s.Guest.gs.Sel = gs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
861{
862 pVCpu->cpum.s.Guest.msrEFER = val;
863}
864
865
866VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
867{
868 if (pcbLimit)
869 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
870 return pVCpu->cpum.s.Guest.idtr.pIdt;
871}
872
873
874VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
875{
876 if (pHidden)
877 *pHidden = pVCpu->cpum.s.Guest.tr;
878 return pVCpu->cpum.s.Guest.tr.Sel;
879}
880
881
882VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
883{
884 return pVCpu->cpum.s.Guest.cs.Sel;
885}
886
887
888VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
889{
890 return pVCpu->cpum.s.Guest.ds.Sel;
891}
892
893
894VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
895{
896 return pVCpu->cpum.s.Guest.es.Sel;
897}
898
899
900VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
901{
902 return pVCpu->cpum.s.Guest.fs.Sel;
903}
904
905
906VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
907{
908 return pVCpu->cpum.s.Guest.gs.Sel;
909}
910
911
912VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
913{
914 return pVCpu->cpum.s.Guest.ss.Sel;
915}
916
917
918VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
919{
920 return pVCpu->cpum.s.Guest.ldtr.Sel;
921}
922
923
924VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
925{
926 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
927 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
928 return pVCpu->cpum.s.Guest.ldtr.Sel;
929}
930
931
932VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
933{
934 return pVCpu->cpum.s.Guest.cr0;
935}
936
937
938VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
939{
940 return pVCpu->cpum.s.Guest.cr2;
941}
942
943
944VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
945{
946 return pVCpu->cpum.s.Guest.cr3;
947}
948
949
950VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
951{
952 return pVCpu->cpum.s.Guest.cr4;
953}
954
955
956VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
957{
958 uint64_t u64;
959 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
960 if (RT_FAILURE(rc))
961 u64 = 0;
962 return u64;
963}
964
965
966VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
967{
968 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
969}
970
971
972VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
973{
974 return pVCpu->cpum.s.Guest.eip;
975}
976
977
978VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
979{
980 return pVCpu->cpum.s.Guest.rip;
981}
982
983
984VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
985{
986 return pVCpu->cpum.s.Guest.eax;
987}
988
989
990VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
991{
992 return pVCpu->cpum.s.Guest.ebx;
993}
994
995
996VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
997{
998 return pVCpu->cpum.s.Guest.ecx;
999}
1000
1001
1002VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1003{
1004 return pVCpu->cpum.s.Guest.edx;
1005}
1006
1007
1008VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1009{
1010 return pVCpu->cpum.s.Guest.esi;
1011}
1012
1013
1014VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1015{
1016 return pVCpu->cpum.s.Guest.edi;
1017}
1018
1019
1020VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1021{
1022 return pVCpu->cpum.s.Guest.esp;
1023}
1024
1025
1026VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1027{
1028 return pVCpu->cpum.s.Guest.ebp;
1029}
1030
1031
1032VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1033{
1034 return pVCpu->cpum.s.Guest.eflags.u32;
1035}
1036
1037
1038VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1039{
1040 switch (iReg)
1041 {
1042 case DISCREG_CR0:
1043 *pValue = pVCpu->cpum.s.Guest.cr0;
1044 break;
1045
1046 case DISCREG_CR2:
1047 *pValue = pVCpu->cpum.s.Guest.cr2;
1048 break;
1049
1050 case DISCREG_CR3:
1051 *pValue = pVCpu->cpum.s.Guest.cr3;
1052 break;
1053
1054 case DISCREG_CR4:
1055 *pValue = pVCpu->cpum.s.Guest.cr4;
1056 break;
1057
1058 case DISCREG_CR8:
1059 {
1060 uint8_t u8Tpr;
1061 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1062 if (RT_FAILURE(rc))
1063 {
1064 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1065 *pValue = 0;
1066 return rc;
1067 }
1068 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1069 break;
1070 }
1071
1072 default:
1073 return VERR_INVALID_PARAMETER;
1074 }
1075 return VINF_SUCCESS;
1076}
1077
1078
1079VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1080{
1081 return pVCpu->cpum.s.Guest.dr[0];
1082}
1083
1084
1085VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1086{
1087 return pVCpu->cpum.s.Guest.dr[1];
1088}
1089
1090
1091VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1092{
1093 return pVCpu->cpum.s.Guest.dr[2];
1094}
1095
1096
1097VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1098{
1099 return pVCpu->cpum.s.Guest.dr[3];
1100}
1101
1102
1103VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1104{
1105 return pVCpu->cpum.s.Guest.dr[6];
1106}
1107
1108
1109VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1110{
1111 return pVCpu->cpum.s.Guest.dr[7];
1112}
1113
1114
1115VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1116{
1117 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1118 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1119 if (iReg == 4 || iReg == 5)
1120 iReg += 2;
1121 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1122 return VINF_SUCCESS;
1123}
1124
1125
1126VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1127{
1128 return pVCpu->cpum.s.Guest.msrEFER;
1129}
1130
1131
1132/**
1133 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1134 *
1135 * @returns Pointer to the leaf if found, NULL if not.
1136 *
1137 * @param pVM The cross context VM structure.
1138 * @param uLeaf The leaf to get.
1139 */
1140PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1141{
1142 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1143 if (iEnd)
1144 {
1145 unsigned iStart = 0;
1146 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1147 for (;;)
1148 {
1149 unsigned i = iStart + (iEnd - iStart) / 2U;
1150 if (uLeaf < paLeaves[i].uLeaf)
1151 {
1152 if (i <= iStart)
1153 return NULL;
1154 iEnd = i;
1155 }
1156 else if (uLeaf > paLeaves[i].uLeaf)
1157 {
1158 i += 1;
1159 if (i >= iEnd)
1160 return NULL;
1161 iStart = i;
1162 }
1163 else
1164 {
1165 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1166 return &paLeaves[i];
1167
1168 /* This shouldn't normally happen. But in case the it does due
1169 to user configuration overrids or something, just return the
1170 first sub-leaf. */
1171 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1172 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1173 while ( paLeaves[i].uSubLeaf != 0
1174 && i > 0
1175 && uLeaf == paLeaves[i - 1].uLeaf)
1176 i--;
1177 return &paLeaves[i];
1178 }
1179 }
1180 }
1181
1182 return NULL;
1183}
1184
1185
1186/**
1187 * Looks up a CPUID leaf in the CPUID leaf array.
1188 *
1189 * @returns Pointer to the leaf if found, NULL if not.
1190 *
1191 * @param pVM The cross context VM structure.
1192 * @param uLeaf The leaf to get.
1193 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1194 * isn't.
1195 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1196 */
1197PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1198{
1199 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1200 if (iEnd)
1201 {
1202 unsigned iStart = 0;
1203 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1204 for (;;)
1205 {
1206 unsigned i = iStart + (iEnd - iStart) / 2U;
1207 if (uLeaf < paLeaves[i].uLeaf)
1208 {
1209 if (i <= iStart)
1210 return NULL;
1211 iEnd = i;
1212 }
1213 else if (uLeaf > paLeaves[i].uLeaf)
1214 {
1215 i += 1;
1216 if (i >= iEnd)
1217 return NULL;
1218 iStart = i;
1219 }
1220 else
1221 {
1222 uSubLeaf &= paLeaves[i].fSubLeafMask;
1223 if (uSubLeaf == paLeaves[i].uSubLeaf)
1224 *pfExactSubLeafHit = true;
1225 else
1226 {
1227 /* Find the right subleaf. We return the last one before
1228 uSubLeaf if we don't find an exact match. */
1229 if (uSubLeaf < paLeaves[i].uSubLeaf)
1230 while ( i > 0
1231 && uLeaf == paLeaves[i - 1].uLeaf
1232 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1233 i--;
1234 else
1235 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1236 && uLeaf == paLeaves[i + 1].uLeaf
1237 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1238 i++;
1239 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1240 }
1241 return &paLeaves[i];
1242 }
1243 }
1244 }
1245
1246 *pfExactSubLeafHit = false;
1247 return NULL;
1248}
1249
1250
1251/**
1252 * Gets a CPUID leaf.
1253 *
1254 * @param pVCpu The cross context virtual CPU structure.
1255 * @param uLeaf The CPUID leaf to get.
1256 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1257 * @param pEax Where to store the EAX value.
1258 * @param pEbx Where to store the EBX value.
1259 * @param pEcx Where to store the ECX value.
1260 * @param pEdx Where to store the EDX value.
1261 */
1262VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1263 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1264{
1265 bool fExactSubLeafHit;
1266 PVM pVM = pVCpu->CTX_SUFF(pVM);
1267 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1268 if (pLeaf)
1269 {
1270 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1271 if (fExactSubLeafHit)
1272 {
1273 *pEax = pLeaf->uEax;
1274 *pEbx = pLeaf->uEbx;
1275 *pEcx = pLeaf->uEcx;
1276 *pEdx = pLeaf->uEdx;
1277
1278 /*
1279 * Deal with CPU specific information.
1280 */
1281 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1282 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1283 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1284 {
1285 if (uLeaf == 1)
1286 {
1287 /* EBX: Bits 31-24: Initial APIC ID. */
1288 Assert(pVCpu->idCpu <= 255);
1289 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1290 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1291
1292 /* EDX: Bit 9: AND with APICBASE.EN. */
1293 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1294 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1295
1296 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1297 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1298 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1299 }
1300 else if (uLeaf == 0xb)
1301 {
1302 /* EDX: Initial extended APIC ID. */
1303 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1304 *pEdx = pVCpu->idCpu;
1305 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1306 }
1307 else if (uLeaf == UINT32_C(0x8000001e))
1308 {
1309 /* EAX: Initial extended APIC ID. */
1310 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1311 *pEax = pVCpu->idCpu;
1312 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1313 }
1314 else if (uLeaf == UINT32_C(0x80000001))
1315 {
1316 /* EDX: Bit 9: AND with APICBASE.EN. */
1317 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1318 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1319 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1320 }
1321 else
1322 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1323 }
1324 }
1325 /*
1326 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1327 * them here, but we do the best we can here...
1328 */
1329 else
1330 {
1331 *pEax = *pEbx = *pEcx = *pEdx = 0;
1332 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1333 {
1334 *pEcx = uSubLeaf & 0xff;
1335 *pEdx = pVCpu->idCpu;
1336 }
1337 }
1338 }
1339 else
1340 {
1341 /*
1342 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1343 */
1344 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1345 {
1346 default:
1347 AssertFailed();
1348 case CPUMUNKNOWNCPUID_DEFAULTS:
1349 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1350 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1351 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1352 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1353 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1354 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1355 break;
1356 case CPUMUNKNOWNCPUID_PASSTHRU:
1357 *pEax = uLeaf;
1358 *pEbx = 0;
1359 *pEcx = uSubLeaf;
1360 *pEdx = 0;
1361 break;
1362 }
1363 }
1364 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1365}
1366
1367
1368/**
1369 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1370 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1371 *
1372 * @returns Previous value.
1373 * @param pVCpu The cross context virtual CPU structure to make the
1374 * change on. Usually the calling EMT.
1375 * @param fVisible Whether to make it visible (true) or hide it (false).
1376 */
1377VMM_INT_DECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1378{
1379 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1380 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1381
1382#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1383 /*
1384 * Patch manager saved state legacy pain.
1385 */
1386 PVM pVM = pVCpu->CTX_SUFF(pVM);
1387 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1388 if (pLeaf)
1389 {
1390 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1391 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1392 else
1393 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1394 }
1395
1396 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1397 if (pLeaf)
1398 {
1399 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1400 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1401 else
1402 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1403 }
1404#endif
1405
1406 return fOld;
1407}
1408
1409
1410/**
1411 * Gets the host CPU vendor.
1412 *
1413 * @returns CPU vendor.
1414 * @param pVM The cross context VM structure.
1415 */
1416VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1417{
1418 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1419}
1420
1421
1422/**
1423 * Gets the CPU vendor.
1424 *
1425 * @returns CPU vendor.
1426 * @param pVM The cross context VM structure.
1427 */
1428VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1429{
1430 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1431}
1432
1433
1434VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1435{
1436 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1437 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1438}
1439
1440
1441VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1442{
1443 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1444 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1445}
1446
1447
1448VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1449{
1450 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1451 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1452}
1453
1454
1455VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1456{
1457 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1458 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1459}
1460
1461
1462VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1463{
1464 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1465 return VINF_SUCCESS; /* No need to recalc. */
1466}
1467
1468
1469VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1470{
1471 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1472 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1473}
1474
1475
1476VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1477{
1478 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1479 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1480 if (iReg == 4 || iReg == 5)
1481 iReg += 2;
1482 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1483 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1484}
1485
1486
1487/**
1488 * Recalculates the hypervisor DRx register values based on current guest
1489 * registers and DBGF breakpoints, updating changed registers depending on the
1490 * context.
1491 *
1492 * This is called whenever a guest DRx register is modified (any context) and
1493 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1494 *
1495 * In raw-mode context this function will reload any (hyper) DRx registers which
1496 * comes out with a different value. It may also have to save the host debug
1497 * registers if that haven't been done already. In this context though, we'll
1498 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1499 * are only important when breakpoints are actually enabled.
1500 *
1501 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1502 * reloaded by the HM code if it changes. Further more, we will only use the
1503 * combined register set when the VBox debugger is actually using hardware BPs,
1504 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1505 * concern us here).
1506 *
1507 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1508 * all the time.
1509 *
1510 * @returns VINF_SUCCESS.
1511 * @param pVCpu The cross context virtual CPU structure.
1512 * @param iGstReg The guest debug register number that was modified.
1513 * UINT8_MAX if not guest register.
1514 * @param fForceHyper Used in HM to force hyper registers because of single
1515 * stepping.
1516 */
1517VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1518{
1519 PVM pVM = pVCpu->CTX_SUFF(pVM);
1520
1521 /*
1522 * Compare the DR7s first.
1523 *
1524 * We only care about the enabled flags. GD is virtualized when we
1525 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1526 * always have the LE and GE bits set, so no need to check and disable
1527 * stuff if they're cleared like we have to for the guest DR7.
1528 */
1529 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1530 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1531 uGstDr7 = 0;
1532 else if (!(uGstDr7 & X86_DR7_LE))
1533 uGstDr7 &= ~X86_DR7_LE_ALL;
1534 else if (!(uGstDr7 & X86_DR7_GE))
1535 uGstDr7 &= ~X86_DR7_GE_ALL;
1536
1537 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1538
1539#ifdef IN_RING0
1540 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1541 fForceHyper = true;
1542#endif
1543 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1544 {
1545 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1546#ifdef IN_RC
1547 bool const fHmEnabled = false;
1548#elif defined(IN_RING3)
1549 bool const fHmEnabled = HMIsEnabled(pVM);
1550#endif
1551
1552 /*
1553 * Ok, something is enabled. Recalc each of the breakpoints, taking
1554 * the VM debugger ones of the guest ones. In raw-mode context we will
1555 * not allow breakpoints with values inside the hypervisor area.
1556 */
1557 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1558
1559 /* bp 0 */
1560 RTGCUINTREG uNewDr0;
1561 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1562 {
1563 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1564 uNewDr0 = DBGFBpGetDR0(pVM);
1565 }
1566 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1567 {
1568 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1569#ifndef IN_RING0
1570 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1571 uNewDr0 = 0;
1572 else
1573#endif
1574 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1575 }
1576 else
1577 uNewDr0 = 0;
1578
1579 /* bp 1 */
1580 RTGCUINTREG uNewDr1;
1581 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1582 {
1583 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1584 uNewDr1 = DBGFBpGetDR1(pVM);
1585 }
1586 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1587 {
1588 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1589#ifndef IN_RING0
1590 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1591 uNewDr1 = 0;
1592 else
1593#endif
1594 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1595 }
1596 else
1597 uNewDr1 = 0;
1598
1599 /* bp 2 */
1600 RTGCUINTREG uNewDr2;
1601 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1602 {
1603 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1604 uNewDr2 = DBGFBpGetDR2(pVM);
1605 }
1606 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1607 {
1608 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1609#ifndef IN_RING0
1610 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1611 uNewDr2 = 0;
1612 else
1613#endif
1614 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1615 }
1616 else
1617 uNewDr2 = 0;
1618
1619 /* bp 3 */
1620 RTGCUINTREG uNewDr3;
1621 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1622 {
1623 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1624 uNewDr3 = DBGFBpGetDR3(pVM);
1625 }
1626 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1627 {
1628 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1629#ifndef IN_RING0
1630 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1631 uNewDr3 = 0;
1632 else
1633#endif
1634 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1635 }
1636 else
1637 uNewDr3 = 0;
1638
1639 /*
1640 * Apply the updates.
1641 */
1642#ifdef IN_RC
1643 /* Make sure to save host registers first. */
1644 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1645 {
1646 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1647 {
1648 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1649 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1650 }
1651 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1652 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1653 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1654 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1655 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1656
1657 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1658 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1659 ASMSetDR0(uNewDr0);
1660 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1661 ASMSetDR1(uNewDr1);
1662 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1663 ASMSetDR2(uNewDr2);
1664 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1665 ASMSetDR3(uNewDr3);
1666 ASMSetDR6(X86_DR6_INIT_VAL);
1667 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1668 ASMSetDR7(uNewDr7);
1669 }
1670 else
1671#endif
1672 {
1673 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1674 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1675 CPUMSetHyperDR3(pVCpu, uNewDr3);
1676 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1677 CPUMSetHyperDR2(pVCpu, uNewDr2);
1678 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1679 CPUMSetHyperDR1(pVCpu, uNewDr1);
1680 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1681 CPUMSetHyperDR0(pVCpu, uNewDr0);
1682 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1683 CPUMSetHyperDR7(pVCpu, uNewDr7);
1684 }
1685 }
1686#ifdef IN_RING0
1687 else if (CPUMIsGuestDebugStateActive(pVCpu))
1688 {
1689 /*
1690 * Reload the register that was modified. Normally this won't happen
1691 * as we won't intercept DRx writes when not having the hyper debug
1692 * state loaded, but in case we do for some reason we'll simply deal
1693 * with it.
1694 */
1695 switch (iGstReg)
1696 {
1697 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1698 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1699 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1700 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1701 default:
1702 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1703 }
1704 }
1705#endif
1706 else
1707 {
1708 /*
1709 * No active debug state any more. In raw-mode this means we have to
1710 * make sure DR7 has everything disabled now, if we armed it already.
1711 * In ring-0 we might end up here when just single stepping.
1712 */
1713#if defined(IN_RC) || defined(IN_RING0)
1714 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1715 {
1716# ifdef IN_RC
1717 ASMSetDR7(X86_DR7_INIT_VAL);
1718# endif
1719 if (pVCpu->cpum.s.Hyper.dr[0])
1720 ASMSetDR0(0);
1721 if (pVCpu->cpum.s.Hyper.dr[1])
1722 ASMSetDR1(0);
1723 if (pVCpu->cpum.s.Hyper.dr[2])
1724 ASMSetDR2(0);
1725 if (pVCpu->cpum.s.Hyper.dr[3])
1726 ASMSetDR3(0);
1727 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1728 }
1729#endif
1730 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1731
1732 /* Clear all the registers. */
1733 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1734 pVCpu->cpum.s.Hyper.dr[3] = 0;
1735 pVCpu->cpum.s.Hyper.dr[2] = 0;
1736 pVCpu->cpum.s.Hyper.dr[1] = 0;
1737 pVCpu->cpum.s.Hyper.dr[0] = 0;
1738
1739 }
1740 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1741 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1742 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1743 pVCpu->cpum.s.Hyper.dr[7]));
1744
1745 return VINF_SUCCESS;
1746}
1747
1748
1749/**
1750 * Set the guest XCR0 register.
1751 *
1752 * Will load additional state if the FPU state is already loaded (in ring-0 &
1753 * raw-mode context).
1754 *
1755 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1756 * value.
1757 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1758 * @param uNewValue The new value.
1759 * @thread EMT(pVCpu)
1760 */
1761VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1762{
1763 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1764 /* The X87 bit cannot be cleared. */
1765 && (uNewValue & XSAVE_C_X87)
1766 /* AVX requires SSE. */
1767 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1768 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1769 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1770 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1771 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1772 )
1773 {
1774 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1775
1776 /* If more state components are enabled, we need to take care to load
1777 them if the FPU/SSE state is already loaded. May otherwise leak
1778 host state to the guest. */
1779 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1780 if (fNewComponents)
1781 {
1782#if defined(IN_RING0) || defined(IN_RC)
1783 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1784 {
1785 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1786 /* Adding more components. */
1787 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1788 else
1789 {
1790 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1791 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1792 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1793 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1794 }
1795 }
1796#endif
1797 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1798 }
1799 return VINF_SUCCESS;
1800 }
1801 return VERR_CPUM_RAISE_GP_0;
1802}
1803
1804
1805/**
1806 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1807 *
1808 * @returns true if in real mode, otherwise false.
1809 * @param pVCpu The cross context virtual CPU structure.
1810 */
1811VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1812{
1813 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1814}
1815
1816
1817/**
1818 * Tests if the guest has the Page Size Extension enabled (PSE).
1819 *
1820 * @returns true if in real mode, otherwise false.
1821 * @param pVCpu The cross context virtual CPU structure.
1822 */
1823VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1824{
1825 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1826 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1827}
1828
1829
1830/**
1831 * Tests if the guest has the paging enabled (PG).
1832 *
1833 * @returns true if in real mode, otherwise false.
1834 * @param pVCpu The cross context virtual CPU structure.
1835 */
1836VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1837{
1838 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1839}
1840
1841
1842/**
1843 * Tests if the guest has the paging enabled (PG).
1844 *
1845 * @returns true if in real mode, otherwise false.
1846 * @param pVCpu The cross context virtual CPU structure.
1847 */
1848VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1849{
1850 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1851}
1852
1853
1854/**
1855 * Tests if the guest is running in real mode or not.
1856 *
1857 * @returns true if in real mode, otherwise false.
1858 * @param pVCpu The cross context virtual CPU structure.
1859 */
1860VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1861{
1862 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1863}
1864
1865
1866/**
1867 * Tests if the guest is running in real or virtual 8086 mode.
1868 *
1869 * @returns @c true if it is, @c false if not.
1870 * @param pVCpu The cross context virtual CPU structure.
1871 */
1872VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
1873{
1874 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1875 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1876}
1877
1878
1879/**
1880 * Tests if the guest is running in protected or not.
1881 *
1882 * @returns true if in protected mode, otherwise false.
1883 * @param pVCpu The cross context virtual CPU structure.
1884 */
1885VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1886{
1887 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1888}
1889
1890
1891/**
1892 * Tests if the guest is running in paged protected or not.
1893 *
1894 * @returns true if in paged protected mode, otherwise false.
1895 * @param pVCpu The cross context virtual CPU structure.
1896 */
1897VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1898{
1899 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1900}
1901
1902
1903/**
1904 * Tests if the guest is running in long mode or not.
1905 *
1906 * @returns true if in long mode, otherwise false.
1907 * @param pVCpu The cross context virtual CPU structure.
1908 */
1909VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1910{
1911 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1912}
1913
1914
1915/**
1916 * Tests if the guest is running in PAE mode or not.
1917 *
1918 * @returns true if in PAE mode, otherwise false.
1919 * @param pVCpu The cross context virtual CPU structure.
1920 */
1921VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1922{
1923 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1924 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1925 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1926 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1927 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1928}
1929
1930
1931/**
1932 * Tests if the guest is running in 64 bits mode or not.
1933 *
1934 * @returns true if in 64 bits protected mode, otherwise false.
1935 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1936 */
1937VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1938{
1939 if (!CPUMIsGuestInLongMode(pVCpu))
1940 return false;
1941 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1942 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1943}
1944
1945
1946/**
1947 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1948 * registers.
1949 *
1950 * @returns true if in 64 bits protected mode, otherwise false.
1951 * @param pCtx Pointer to the current guest CPU context.
1952 */
1953VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1954{
1955 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1956}
1957
1958#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1959
1960/**
1961 *
1962 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
1963 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
1964 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1965 */
1966VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
1967{
1968 return pVCpu->cpum.s.fRawEntered;
1969}
1970
1971/**
1972 * Transforms the guest CPU state to raw-ring mode.
1973 *
1974 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1975 *
1976 * @returns VBox status code. (recompiler failure)
1977 * @param pVCpu The cross context virtual CPU structure.
1978 * @see @ref pg_raw
1979 */
1980VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
1981{
1982 PVM pVM = pVCpu->CTX_SUFF(pVM);
1983
1984 Assert(!pVCpu->cpum.s.fRawEntered);
1985 Assert(!pVCpu->cpum.s.fRemEntered);
1986 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1987
1988 /*
1989 * Are we in Ring-0?
1990 */
1991 if ( pCtx->ss.Sel
1992 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
1993 && !pCtx->eflags.Bits.u1VM)
1994 {
1995 /*
1996 * Enter execution mode.
1997 */
1998 PATMRawEnter(pVM, pCtx);
1999
2000 /*
2001 * Set CPL to Ring-1.
2002 */
2003 pCtx->ss.Sel |= 1;
2004 if ( pCtx->cs.Sel
2005 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2006 pCtx->cs.Sel |= 1;
2007 }
2008 else
2009 {
2010# ifdef VBOX_WITH_RAW_RING1
2011 if ( EMIsRawRing1Enabled(pVM)
2012 && !pCtx->eflags.Bits.u1VM
2013 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2014 {
2015 /* Set CPL to Ring-2. */
2016 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2017 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2018 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2019 }
2020# else
2021 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2022 ("ring-1 code not supported\n"));
2023# endif
2024 /*
2025 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2026 */
2027 PATMRawEnter(pVM, pCtx);
2028 }
2029
2030 /*
2031 * Assert sanity.
2032 */
2033 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2034 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2035 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2036 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
2037
2038 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2039
2040 pVCpu->cpum.s.fRawEntered = true;
2041 return VINF_SUCCESS;
2042}
2043
2044
2045/**
2046 * Transforms the guest CPU state from raw-ring mode to correct values.
2047 *
2048 * This function will change any selector registers with DPL=1 to DPL=0.
2049 *
2050 * @returns Adjusted rc.
2051 * @param pVCpu The cross context virtual CPU structure.
2052 * @param rc Raw mode return code
2053 * @see @ref pg_raw
2054 */
2055VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2056{
2057 PVM pVM = pVCpu->CTX_SUFF(pVM);
2058
2059 /*
2060 * Don't leave if we've already left (in RC).
2061 */
2062 Assert(!pVCpu->cpum.s.fRemEntered);
2063 if (!pVCpu->cpum.s.fRawEntered)
2064 return rc;
2065 pVCpu->cpum.s.fRawEntered = false;
2066
2067 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2068 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2069 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2070 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2071
2072 /*
2073 * Are we executing in raw ring-1?
2074 */
2075 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2076 && !pCtx->eflags.Bits.u1VM)
2077 {
2078 /*
2079 * Leave execution mode.
2080 */
2081 PATMRawLeave(pVM, pCtx, rc);
2082 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2083 /** @todo See what happens if we remove this. */
2084 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2085 pCtx->ds.Sel &= ~X86_SEL_RPL;
2086 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2087 pCtx->es.Sel &= ~X86_SEL_RPL;
2088 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2089 pCtx->fs.Sel &= ~X86_SEL_RPL;
2090 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2091 pCtx->gs.Sel &= ~X86_SEL_RPL;
2092
2093 /*
2094 * Ring-1 selector => Ring-0.
2095 */
2096 pCtx->ss.Sel &= ~X86_SEL_RPL;
2097 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2098 pCtx->cs.Sel &= ~X86_SEL_RPL;
2099 }
2100 else
2101 {
2102 /*
2103 * PATM is taking care of the IOPL and IF flags for us.
2104 */
2105 PATMRawLeave(pVM, pCtx, rc);
2106 if (!pCtx->eflags.Bits.u1VM)
2107 {
2108# ifdef VBOX_WITH_RAW_RING1
2109 if ( EMIsRawRing1Enabled(pVM)
2110 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2111 {
2112 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2113 /** @todo See what happens if we remove this. */
2114 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2115 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2116 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2117 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2118 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2119 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2120 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2121 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2122
2123 /*
2124 * Ring-2 selector => Ring-1.
2125 */
2126 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2127 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2128 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2129 }
2130 else
2131 {
2132# endif
2133 /** @todo See what happens if we remove this. */
2134 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2135 pCtx->ds.Sel &= ~X86_SEL_RPL;
2136 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2137 pCtx->es.Sel &= ~X86_SEL_RPL;
2138 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2139 pCtx->fs.Sel &= ~X86_SEL_RPL;
2140 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2141 pCtx->gs.Sel &= ~X86_SEL_RPL;
2142# ifdef VBOX_WITH_RAW_RING1
2143 }
2144# endif
2145 }
2146 }
2147
2148 return rc;
2149}
2150
2151#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2152
2153/**
2154 * Updates the EFLAGS while we're in raw-mode.
2155 *
2156 * @param pVCpu The cross context virtual CPU structure.
2157 * @param fEfl The new EFLAGS value.
2158 */
2159VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2160{
2161#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2162 if (pVCpu->cpum.s.fRawEntered)
2163 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2164 else
2165#endif
2166 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2167}
2168
2169
2170/**
2171 * Gets the EFLAGS while we're in raw-mode.
2172 *
2173 * @returns The eflags.
2174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2175 */
2176VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2177{
2178#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2179 if (pVCpu->cpum.s.fRawEntered)
2180 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2181#endif
2182 return pVCpu->cpum.s.Guest.eflags.u32;
2183}
2184
2185
2186/**
2187 * Sets the specified changed flags (CPUM_CHANGED_*).
2188 *
2189 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2190 * @param fChangedAdd The changed flags to add.
2191 */
2192VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2193{
2194 pVCpu->cpum.s.fChanged |= fChangedAdd;
2195}
2196
2197
2198/**
2199 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2200 *
2201 * @returns true if supported.
2202 * @returns false if not supported.
2203 * @param pVM The cross context VM structure.
2204 */
2205VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2206{
2207 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2208}
2209
2210
2211/**
2212 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2213 * @returns true if used.
2214 * @returns false if not used.
2215 * @param pVM The cross context VM structure.
2216 */
2217VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2218{
2219 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2220}
2221
2222
2223/**
2224 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2225 * @returns true if used.
2226 * @returns false if not used.
2227 * @param pVM The cross context VM structure.
2228 */
2229VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2230{
2231 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2232}
2233
2234#ifdef IN_RC
2235
2236/**
2237 * Lazily sync in the FPU/XMM state.
2238 *
2239 * @returns VBox status code.
2240 * @param pVCpu The cross context virtual CPU structure.
2241 */
2242VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2243{
2244 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2245}
2246
2247#endif /* !IN_RC */
2248
2249/**
2250 * Checks if we activated the FPU/XMM state of the guest OS.
2251 *
2252 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2253 * time we'll be executing guest code, so it may return true for 64-on-32 when
2254 * we still haven't actually loaded the FPU status, just scheduled it to be
2255 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2256 *
2257 * @returns true / false.
2258 * @param pVCpu The cross context virtual CPU structure.
2259 */
2260VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2261{
2262 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2263}
2264
2265
2266/**
2267 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2268 *
2269 * @returns true / false.
2270 * @param pVCpu The cross context virtual CPU structure.
2271 */
2272VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2273{
2274 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2275}
2276
2277
2278/**
2279 * Checks if we saved the FPU/XMM state of the host OS.
2280 *
2281 * @returns true / false.
2282 * @param pVCpu The cross context virtual CPU structure.
2283 */
2284VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2285{
2286 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2287}
2288
2289
2290/**
2291 * Checks if the guest debug state is active.
2292 *
2293 * @returns boolean
2294 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2295 */
2296VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2297{
2298 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2299}
2300
2301
2302/**
2303 * Checks if the guest debug state is to be made active during the world-switch
2304 * (currently only used for the 32->64 switcher case).
2305 *
2306 * @returns boolean
2307 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2308 */
2309VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2310{
2311 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2312}
2313
2314
2315/**
2316 * Checks if the hyper debug state is active.
2317 *
2318 * @returns boolean
2319 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2320 */
2321VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2322{
2323 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2324}
2325
2326
2327/**
2328 * Checks if the hyper debug state is to be made active during the world-switch
2329 * (currently only used for the 32->64 switcher case).
2330 *
2331 * @returns boolean
2332 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2333 */
2334VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2335{
2336 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2337}
2338
2339
2340/**
2341 * Mark the guest's debug state as inactive.
2342 *
2343 * @returns boolean
2344 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2345 * @todo This API doesn't make sense any more.
2346 */
2347VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2348{
2349 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2350 NOREF(pVCpu);
2351}
2352
2353
2354/**
2355 * Get the current privilege level of the guest.
2356 *
2357 * @returns CPL
2358 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2359 */
2360VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2361{
2362 /*
2363 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2364 *
2365 * Note! We used to check CS.DPL here, assuming it was always equal to
2366 * CPL even if a conforming segment was loaded. But this truned out to
2367 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2368 * during install after a far call to ring 2 with VT-x. Then on newer
2369 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2370 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2371 *
2372 * So, forget CS.DPL, always use SS.DPL.
2373 *
2374 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2375 * isn't necessarily equal if the segment is conforming.
2376 * See section 4.11.1 in the AMD manual.
2377 *
2378 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2379 * right after real->prot mode switch and when in V8086 mode? That
2380 * section says the RPL specified in a direct transfere (call, jmp,
2381 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2382 * it would be impossible for an exception handle or the iret
2383 * instruction to figure out whether SS:ESP are part of the frame
2384 * or not. VBox or qemu bug must've lead to this misconception.
2385 *
2386 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2387 * selector into SS with an RPL other than the CPL when CPL != 3 and
2388 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2389 * RPL = CPL. Weird.
2390 */
2391 uint32_t uCpl;
2392 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2393 {
2394 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2395 {
2396 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2397 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2398 else
2399 {
2400 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2401#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2402# ifdef VBOX_WITH_RAW_RING1
2403 if (pVCpu->cpum.s.fRawEntered)
2404 {
2405 if ( uCpl == 2
2406 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2407 uCpl = 1;
2408 else if (uCpl == 1)
2409 uCpl = 0;
2410 }
2411 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2412# else
2413 if (uCpl == 1)
2414 uCpl = 0;
2415# endif
2416#endif
2417 }
2418 }
2419 else
2420 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2421 }
2422 else
2423 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2424 return uCpl;
2425}
2426
2427
2428/**
2429 * Gets the current guest CPU mode.
2430 *
2431 * If paging mode is what you need, check out PGMGetGuestMode().
2432 *
2433 * @returns The CPU mode.
2434 * @param pVCpu The cross context virtual CPU structure.
2435 */
2436VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2437{
2438 CPUMMODE enmMode;
2439 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2440 enmMode = CPUMMODE_REAL;
2441 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2442 enmMode = CPUMMODE_PROTECTED;
2443 else
2444 enmMode = CPUMMODE_LONG;
2445
2446 return enmMode;
2447}
2448
2449
2450/**
2451 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2452 *
2453 * @returns 16, 32 or 64.
2454 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2455 */
2456VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2457{
2458 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2459 return 16;
2460
2461 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2462 {
2463 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2464 return 16;
2465 }
2466
2467 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2468 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2469 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2470 return 64;
2471
2472 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2473 return 32;
2474
2475 return 16;
2476}
2477
2478
2479VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2480{
2481 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2482 return DISCPUMODE_16BIT;
2483
2484 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2485 {
2486 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2487 return DISCPUMODE_16BIT;
2488 }
2489
2490 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2491 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2492 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2493 return DISCPUMODE_64BIT;
2494
2495 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2496 return DISCPUMODE_32BIT;
2497
2498 return DISCPUMODE_16BIT;
2499}
2500
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette