VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 62440

Last change on this file since 62440 was 62440, checked in by vboxsync, 8 years ago

VMM: More MSC level 4 warning fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 71.3 KB
Line 
1/* $Id: CPUMAllRegs.cpp 62440 2016-07-22 13:14:01Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 */
157VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
158{
159 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
160}
161
162#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
163
164
165/**
166 * Obsolete.
167 *
168 * We don't support nested hypervisor context interrupts or traps. Life is much
169 * simpler when we don't. It's also slightly faster at times.
170 *
171 * @param pVCpu The cross context virtual CPU structure.
172 */
173VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
174{
175 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
176}
177
178
179/**
180 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
181 *
182 * @param pVCpu The cross context virtual CPU structure.
183 */
184VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
185{
186 return &pVCpu->cpum.s.Hyper;
187}
188
189
190VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
191{
192 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
193 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
194}
195
196
197VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
198{
199 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
200 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
201}
202
203
204VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
205{
206 pVCpu->cpum.s.Hyper.cr3 = cr3;
207
208#ifdef IN_RC
209 /* Update the current CR3. */
210 ASMSetCR3(cr3);
211#endif
212}
213
214VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
215{
216 return pVCpu->cpum.s.Hyper.cr3;
217}
218
219
220VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
221{
222 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
223}
224
225
226VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
227{
228 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
229}
230
231
232VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
233{
234 pVCpu->cpum.s.Hyper.es.Sel = SelES;
235}
236
237
238VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
239{
240 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
241}
242
243
244VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
245{
246 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
247}
248
249
250VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
251{
252 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
253}
254
255
256VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
257{
258 pVCpu->cpum.s.Hyper.esp = u32ESP;
259}
260
261
262VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
263{
264 pVCpu->cpum.s.Hyper.esp = u32ESP;
265}
266
267
268VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
269{
270 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
271 return VINF_SUCCESS;
272}
273
274
275VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
276{
277 pVCpu->cpum.s.Hyper.eip = u32EIP;
278}
279
280
281/**
282 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
283 * EFLAGS and EIP prior to resuming guest execution.
284 *
285 * All general register not given as a parameter will be set to 0. The EFLAGS
286 * register will be set to sane values for C/C++ code execution with interrupts
287 * disabled and IOPL 0.
288 *
289 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
290 * @param u32EIP The EIP value.
291 * @param u32ESP The ESP value.
292 * @param u32EAX The EAX value.
293 * @param u32EDX The EDX value.
294 */
295VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
296{
297 pVCpu->cpum.s.Hyper.eip = u32EIP;
298 pVCpu->cpum.s.Hyper.esp = u32ESP;
299 pVCpu->cpum.s.Hyper.eax = u32EAX;
300 pVCpu->cpum.s.Hyper.edx = u32EDX;
301 pVCpu->cpum.s.Hyper.ecx = 0;
302 pVCpu->cpum.s.Hyper.ebx = 0;
303 pVCpu->cpum.s.Hyper.ebp = 0;
304 pVCpu->cpum.s.Hyper.esi = 0;
305 pVCpu->cpum.s.Hyper.edi = 0;
306 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
307}
308
309
310VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
311{
312 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
313}
314
315
316VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
317{
318 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
319}
320
321
322/** @def MAYBE_LOAD_DRx
323 * Macro for updating DRx values in raw-mode and ring-0 contexts.
324 */
325#ifdef IN_RING0
326# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
327# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
328 do { \
329 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
330 a_fnLoad(a_uValue); \
331 else \
332 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
333 } while (0)
334# else
335# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
336 do { \
337 a_fnLoad(a_uValue); \
338 } while (0)
339# endif
340
341#elif defined(IN_RC)
342# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
343 do { \
344 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
345 { a_fnLoad(a_uValue); } \
346 } while (0)
347
348#else
349# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
350#endif
351
352VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
353{
354 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
355 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
356}
357
358
359VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
360{
361 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
362 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
363}
364
365
366VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
367{
368 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
369 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
370}
371
372
373VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
374{
375 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
376 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
377}
378
379
380VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
381{
382 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
383}
384
385
386VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
387{
388 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
389#ifdef IN_RC
390 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
391#endif
392}
393
394
395VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
396{
397 return pVCpu->cpum.s.Hyper.cs.Sel;
398}
399
400
401VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
402{
403 return pVCpu->cpum.s.Hyper.ds.Sel;
404}
405
406
407VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
408{
409 return pVCpu->cpum.s.Hyper.es.Sel;
410}
411
412
413VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
414{
415 return pVCpu->cpum.s.Hyper.fs.Sel;
416}
417
418
419VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
420{
421 return pVCpu->cpum.s.Hyper.gs.Sel;
422}
423
424
425VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
426{
427 return pVCpu->cpum.s.Hyper.ss.Sel;
428}
429
430
431VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
432{
433 return pVCpu->cpum.s.Hyper.eax;
434}
435
436
437VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
438{
439 return pVCpu->cpum.s.Hyper.ebx;
440}
441
442
443VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
444{
445 return pVCpu->cpum.s.Hyper.ecx;
446}
447
448
449VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
450{
451 return pVCpu->cpum.s.Hyper.edx;
452}
453
454
455VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
456{
457 return pVCpu->cpum.s.Hyper.esi;
458}
459
460
461VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
462{
463 return pVCpu->cpum.s.Hyper.edi;
464}
465
466
467VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
468{
469 return pVCpu->cpum.s.Hyper.ebp;
470}
471
472
473VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
474{
475 return pVCpu->cpum.s.Hyper.esp;
476}
477
478
479VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
480{
481 return pVCpu->cpum.s.Hyper.eflags.u32;
482}
483
484
485VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
486{
487 return pVCpu->cpum.s.Hyper.eip;
488}
489
490
491VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
492{
493 return pVCpu->cpum.s.Hyper.rip;
494}
495
496
497VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
498{
499 if (pcbLimit)
500 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
501 return pVCpu->cpum.s.Hyper.idtr.pIdt;
502}
503
504
505VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
506{
507 if (pcbLimit)
508 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
509 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
510}
511
512
513VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
514{
515 return pVCpu->cpum.s.Hyper.ldtr.Sel;
516}
517
518
519VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
520{
521 return pVCpu->cpum.s.Hyper.dr[0];
522}
523
524
525VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
526{
527 return pVCpu->cpum.s.Hyper.dr[1];
528}
529
530
531VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
532{
533 return pVCpu->cpum.s.Hyper.dr[2];
534}
535
536
537VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
538{
539 return pVCpu->cpum.s.Hyper.dr[3];
540}
541
542
543VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
544{
545 return pVCpu->cpum.s.Hyper.dr[6];
546}
547
548
549VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
550{
551 return pVCpu->cpum.s.Hyper.dr[7];
552}
553
554
555/**
556 * Gets the pointer to the internal CPUMCTXCORE structure.
557 * This is only for reading in order to save a few calls.
558 *
559 * @param pVCpu The cross context virtual CPU structure.
560 */
561VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
562{
563 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
564}
565
566
567/**
568 * Queries the pointer to the internal CPUMCTX structure.
569 *
570 * @returns The CPUMCTX pointer.
571 * @param pVCpu The cross context virtual CPU structure.
572 */
573VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
574{
575 return &pVCpu->cpum.s.Guest;
576}
577
578VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
579{
580#ifdef VBOX_WITH_RAW_MODE_NOT_R0
581 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
583#endif
584 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
585 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
586 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
587 return VINF_SUCCESS; /* formality, consider it void. */
588}
589
590VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
591{
592#ifdef VBOX_WITH_RAW_MODE_NOT_R0
593 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
594 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
595#endif
596 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
597 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
598 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
599 return VINF_SUCCESS; /* formality, consider it void. */
600}
601
602VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
603{
604#ifdef VBOX_WITH_RAW_MODE_NOT_R0
605 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
606 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
607#endif
608 pVCpu->cpum.s.Guest.tr.Sel = tr;
609 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
610 return VINF_SUCCESS; /* formality, consider it void. */
611}
612
613VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
614{
615#ifdef VBOX_WITH_RAW_MODE_NOT_R0
616 if ( ( ldtr != 0
617 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
618 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
619 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
620#endif
621 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
622 /* The caller will set more hidden bits if it has them. */
623 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
624 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
625 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
626 return VINF_SUCCESS; /* formality, consider it void. */
627}
628
629
630/**
631 * Set the guest CR0.
632 *
633 * When called in GC, the hyper CR0 may be updated if that is
634 * required. The caller only has to take special action if AM,
635 * WP, PG or PE changes.
636 *
637 * @returns VINF_SUCCESS (consider it void).
638 * @param pVCpu The cross context virtual CPU structure.
639 * @param cr0 The new CR0 value.
640 */
641VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
642{
643#ifdef IN_RC
644 /*
645 * Check if we need to change hypervisor CR0 because
646 * of math stuff.
647 */
648 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
649 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
650 {
651 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
652 {
653 /*
654 * We haven't loaded the guest FPU state yet, so TS and MT are both set
655 * and EM should be reflecting the guest EM (it always does this).
656 */
657 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
658 {
659 uint32_t HyperCR0 = ASMGetCR0();
660 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
661 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
662 HyperCR0 &= ~X86_CR0_EM;
663 HyperCR0 |= cr0 & X86_CR0_EM;
664 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
665 ASMSetCR0(HyperCR0);
666 }
667# ifdef VBOX_STRICT
668 else
669 {
670 uint32_t HyperCR0 = ASMGetCR0();
671 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
672 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
673 }
674# endif
675 }
676 else
677 {
678 /*
679 * Already loaded the guest FPU state, so we're just mirroring
680 * the guest flags.
681 */
682 uint32_t HyperCR0 = ASMGetCR0();
683 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
684 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
685 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
686 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
687 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
688 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
689 ASMSetCR0(HyperCR0);
690 }
691 }
692#endif /* IN_RC */
693
694 /*
695 * Check for changes causing TLB flushes (for REM).
696 * The caller is responsible for calling PGM when appropriate.
697 */
698 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
699 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
700 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
701 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
702
703 /*
704 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
705 */
706 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
707 PGMCr0WpEnabled(pVCpu);
708
709 /* The ET flag is settable on a 386 and hardwired on 486+. */
710 if ( !(cr0 & X86_CR0_ET)
711 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
712 cr0 |= X86_CR0_ET;
713
714 pVCpu->cpum.s.Guest.cr0 = cr0;
715 return VINF_SUCCESS;
716}
717
718
719VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
720{
721 pVCpu->cpum.s.Guest.cr2 = cr2;
722 return VINF_SUCCESS;
723}
724
725
726VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
727{
728 pVCpu->cpum.s.Guest.cr3 = cr3;
729 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
735{
736 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
737
738 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
739 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
740 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
741
742 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
743 pVCpu->cpum.s.Guest.cr4 = cr4;
744 return VINF_SUCCESS;
745}
746
747
748VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
749{
750 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
756{
757 pVCpu->cpum.s.Guest.eip = eip;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
763{
764 pVCpu->cpum.s.Guest.eax = eax;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
770{
771 pVCpu->cpum.s.Guest.ebx = ebx;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
777{
778 pVCpu->cpum.s.Guest.ecx = ecx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
784{
785 pVCpu->cpum.s.Guest.edx = edx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
791{
792 pVCpu->cpum.s.Guest.esp = esp;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
798{
799 pVCpu->cpum.s.Guest.ebp = ebp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
805{
806 pVCpu->cpum.s.Guest.esi = esi;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
812{
813 pVCpu->cpum.s.Guest.edi = edi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
819{
820 pVCpu->cpum.s.Guest.ss.Sel = ss;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
826{
827 pVCpu->cpum.s.Guest.cs.Sel = cs;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
833{
834 pVCpu->cpum.s.Guest.ds.Sel = ds;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
840{
841 pVCpu->cpum.s.Guest.es.Sel = es;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
847{
848 pVCpu->cpum.s.Guest.fs.Sel = fs;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
854{
855 pVCpu->cpum.s.Guest.gs.Sel = gs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
861{
862 pVCpu->cpum.s.Guest.msrEFER = val;
863}
864
865
866VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
867{
868 if (pcbLimit)
869 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
870 return pVCpu->cpum.s.Guest.idtr.pIdt;
871}
872
873
874VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
875{
876 if (pHidden)
877 *pHidden = pVCpu->cpum.s.Guest.tr;
878 return pVCpu->cpum.s.Guest.tr.Sel;
879}
880
881
882VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
883{
884 return pVCpu->cpum.s.Guest.cs.Sel;
885}
886
887
888VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
889{
890 return pVCpu->cpum.s.Guest.ds.Sel;
891}
892
893
894VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
895{
896 return pVCpu->cpum.s.Guest.es.Sel;
897}
898
899
900VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
901{
902 return pVCpu->cpum.s.Guest.fs.Sel;
903}
904
905
906VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
907{
908 return pVCpu->cpum.s.Guest.gs.Sel;
909}
910
911
912VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
913{
914 return pVCpu->cpum.s.Guest.ss.Sel;
915}
916
917
918VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
919{
920 return pVCpu->cpum.s.Guest.ldtr.Sel;
921}
922
923
924VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
925{
926 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
927 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
928 return pVCpu->cpum.s.Guest.ldtr.Sel;
929}
930
931
932VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
933{
934 return pVCpu->cpum.s.Guest.cr0;
935}
936
937
938VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
939{
940 return pVCpu->cpum.s.Guest.cr2;
941}
942
943
944VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
945{
946 return pVCpu->cpum.s.Guest.cr3;
947}
948
949
950VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
951{
952 return pVCpu->cpum.s.Guest.cr4;
953}
954
955
956VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
957{
958 uint64_t u64;
959 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
960 if (RT_FAILURE(rc))
961 u64 = 0;
962 return u64;
963}
964
965
966VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
967{
968 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
969}
970
971
972VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
973{
974 return pVCpu->cpum.s.Guest.eip;
975}
976
977
978VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
979{
980 return pVCpu->cpum.s.Guest.rip;
981}
982
983
984VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
985{
986 return pVCpu->cpum.s.Guest.eax;
987}
988
989
990VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
991{
992 return pVCpu->cpum.s.Guest.ebx;
993}
994
995
996VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
997{
998 return pVCpu->cpum.s.Guest.ecx;
999}
1000
1001
1002VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1003{
1004 return pVCpu->cpum.s.Guest.edx;
1005}
1006
1007
1008VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1009{
1010 return pVCpu->cpum.s.Guest.esi;
1011}
1012
1013
1014VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1015{
1016 return pVCpu->cpum.s.Guest.edi;
1017}
1018
1019
1020VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1021{
1022 return pVCpu->cpum.s.Guest.esp;
1023}
1024
1025
1026VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1027{
1028 return pVCpu->cpum.s.Guest.ebp;
1029}
1030
1031
1032VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1033{
1034 return pVCpu->cpum.s.Guest.eflags.u32;
1035}
1036
1037
1038VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1039{
1040 switch (iReg)
1041 {
1042 case DISCREG_CR0:
1043 *pValue = pVCpu->cpum.s.Guest.cr0;
1044 break;
1045
1046 case DISCREG_CR2:
1047 *pValue = pVCpu->cpum.s.Guest.cr2;
1048 break;
1049
1050 case DISCREG_CR3:
1051 *pValue = pVCpu->cpum.s.Guest.cr3;
1052 break;
1053
1054 case DISCREG_CR4:
1055 *pValue = pVCpu->cpum.s.Guest.cr4;
1056 break;
1057
1058 case DISCREG_CR8:
1059 {
1060 uint8_t u8Tpr;
1061 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1062 if (RT_FAILURE(rc))
1063 {
1064 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1065 *pValue = 0;
1066 return rc;
1067 }
1068 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1069 break;
1070 }
1071
1072 default:
1073 return VERR_INVALID_PARAMETER;
1074 }
1075 return VINF_SUCCESS;
1076}
1077
1078
1079VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1080{
1081 return pVCpu->cpum.s.Guest.dr[0];
1082}
1083
1084
1085VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1086{
1087 return pVCpu->cpum.s.Guest.dr[1];
1088}
1089
1090
1091VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1092{
1093 return pVCpu->cpum.s.Guest.dr[2];
1094}
1095
1096
1097VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1098{
1099 return pVCpu->cpum.s.Guest.dr[3];
1100}
1101
1102
1103VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1104{
1105 return pVCpu->cpum.s.Guest.dr[6];
1106}
1107
1108
1109VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1110{
1111 return pVCpu->cpum.s.Guest.dr[7];
1112}
1113
1114
1115VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1116{
1117 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1118 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1119 if (iReg == 4 || iReg == 5)
1120 iReg += 2;
1121 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1122 return VINF_SUCCESS;
1123}
1124
1125
1126VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1127{
1128 return pVCpu->cpum.s.Guest.msrEFER;
1129}
1130
1131
1132/**
1133 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1134 *
1135 * @returns Pointer to the leaf if found, NULL if not.
1136 *
1137 * @param pVM The cross context VM structure.
1138 * @param uLeaf The leaf to get.
1139 */
1140PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1141{
1142 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1143 if (iEnd)
1144 {
1145 unsigned iStart = 0;
1146 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1147 for (;;)
1148 {
1149 unsigned i = iStart + (iEnd - iStart) / 2U;
1150 if (uLeaf < paLeaves[i].uLeaf)
1151 {
1152 if (i <= iStart)
1153 return NULL;
1154 iEnd = i;
1155 }
1156 else if (uLeaf > paLeaves[i].uLeaf)
1157 {
1158 i += 1;
1159 if (i >= iEnd)
1160 return NULL;
1161 iStart = i;
1162 }
1163 else
1164 {
1165 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1166 return &paLeaves[i];
1167
1168 /* This shouldn't normally happen. But in case the it does due
1169 to user configuration overrids or something, just return the
1170 first sub-leaf. */
1171 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1172 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1173 while ( paLeaves[i].uSubLeaf != 0
1174 && i > 0
1175 && uLeaf == paLeaves[i - 1].uLeaf)
1176 i--;
1177 return &paLeaves[i];
1178 }
1179 }
1180 }
1181
1182 return NULL;
1183}
1184
1185
1186/**
1187 * Looks up a CPUID leaf in the CPUID leaf array.
1188 *
1189 * @returns Pointer to the leaf if found, NULL if not.
1190 *
1191 * @param pVM The cross context VM structure.
1192 * @param uLeaf The leaf to get.
1193 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1194 * isn't.
1195 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1196 */
1197PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1198{
1199 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1200 if (iEnd)
1201 {
1202 unsigned iStart = 0;
1203 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1204 for (;;)
1205 {
1206 unsigned i = iStart + (iEnd - iStart) / 2U;
1207 if (uLeaf < paLeaves[i].uLeaf)
1208 {
1209 if (i <= iStart)
1210 return NULL;
1211 iEnd = i;
1212 }
1213 else if (uLeaf > paLeaves[i].uLeaf)
1214 {
1215 i += 1;
1216 if (i >= iEnd)
1217 return NULL;
1218 iStart = i;
1219 }
1220 else
1221 {
1222 uSubLeaf &= paLeaves[i].fSubLeafMask;
1223 if (uSubLeaf == paLeaves[i].uSubLeaf)
1224 *pfExactSubLeafHit = true;
1225 else
1226 {
1227 /* Find the right subleaf. We return the last one before
1228 uSubLeaf if we don't find an exact match. */
1229 if (uSubLeaf < paLeaves[i].uSubLeaf)
1230 while ( i > 0
1231 && uLeaf == paLeaves[i - 1].uLeaf
1232 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1233 i--;
1234 else
1235 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1236 && uLeaf == paLeaves[i + 1].uLeaf
1237 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1238 i++;
1239 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1240 }
1241 return &paLeaves[i];
1242 }
1243 }
1244 }
1245
1246 *pfExactSubLeafHit = false;
1247 return NULL;
1248}
1249
1250
1251/**
1252 * Gets a CPUID leaf.
1253 *
1254 * @param pVCpu The cross context virtual CPU structure.
1255 * @param uLeaf The CPUID leaf to get.
1256 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1257 * @param pEax Where to store the EAX value.
1258 * @param pEbx Where to store the EBX value.
1259 * @param pEcx Where to store the ECX value.
1260 * @param pEdx Where to store the EDX value.
1261 */
1262VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1263 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1264{
1265 bool fExactSubLeafHit;
1266 PVM pVM = pVCpu->CTX_SUFF(pVM);
1267 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1268 if (pLeaf)
1269 {
1270 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1271 if (fExactSubLeafHit)
1272 {
1273 *pEax = pLeaf->uEax;
1274 *pEbx = pLeaf->uEbx;
1275 *pEcx = pLeaf->uEcx;
1276 *pEdx = pLeaf->uEdx;
1277
1278 /*
1279 * Deal with CPU specific information.
1280 */
1281 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1282 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1283 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1284 {
1285 if (uLeaf == 1)
1286 {
1287 /* EBX: Bits 31-24: Initial APIC ID. */
1288 Assert(pVCpu->idCpu <= 255);
1289 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1290 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1291
1292 /* EDX: Bit 9: AND with APICBASE.EN. */
1293 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1294 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1295
1296 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1297 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1298 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1299 }
1300 else if (uLeaf == 0xb)
1301 {
1302 /* EDX: Initial extended APIC ID. */
1303 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1304 *pEdx = pVCpu->idCpu;
1305 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1306 }
1307 else if (uLeaf == UINT32_C(0x8000001e))
1308 {
1309 /* EAX: Initial extended APIC ID. */
1310 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1311 *pEax = pVCpu->idCpu;
1312 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1313 }
1314 else if (uLeaf == UINT32_C(0x80000001))
1315 {
1316 /* EDX: Bit 9: AND with APICBASE.EN. */
1317 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1318 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1319 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1320 }
1321 else
1322 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1323 }
1324 }
1325 /*
1326 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1327 * them here, but we do the best we can here...
1328 */
1329 else
1330 {
1331 *pEax = *pEbx = *pEcx = *pEdx = 0;
1332 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1333 {
1334 *pEcx = uSubLeaf & 0xff;
1335 *pEdx = pVCpu->idCpu;
1336 }
1337 }
1338 }
1339 else
1340 {
1341 /*
1342 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1343 */
1344 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1345 {
1346 default:
1347 AssertFailed();
1348 case CPUMUNKNOWNCPUID_DEFAULTS:
1349 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1350 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1351 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1352 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1353 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1354 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1355 break;
1356 case CPUMUNKNOWNCPUID_PASSTHRU:
1357 *pEax = uLeaf;
1358 *pEbx = 0;
1359 *pEcx = uSubLeaf;
1360 *pEdx = 0;
1361 break;
1362 }
1363 }
1364 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1365}
1366
1367
1368/**
1369 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1370 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1371 *
1372 * @returns Previous value.
1373 * @param pVCpu The cross context virtual CPU structure to make the
1374 * change on. Usually the calling EMT.
1375 * @param fVisible Whether to make it visible (true) or hide it (false).
1376 *
1377 * @remarks This is "VMMDECL" so that it still links with
1378 * the old APIC code which is in VBoxDD2 and not in
1379 * the VMM module.
1380 */
1381VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1382{
1383 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1384 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1385
1386#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1387 /*
1388 * Patch manager saved state legacy pain.
1389 */
1390 PVM pVM = pVCpu->CTX_SUFF(pVM);
1391 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1392 if (pLeaf)
1393 {
1394 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1395 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1396 else
1397 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1398 }
1399
1400 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1401 if (pLeaf)
1402 {
1403 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1404 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1405 else
1406 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1407 }
1408#endif
1409
1410 return fOld;
1411}
1412
1413
1414/**
1415 * Gets the host CPU vendor.
1416 *
1417 * @returns CPU vendor.
1418 * @param pVM The cross context VM structure.
1419 */
1420VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1421{
1422 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1423}
1424
1425
1426/**
1427 * Gets the CPU vendor.
1428 *
1429 * @returns CPU vendor.
1430 * @param pVM The cross context VM structure.
1431 */
1432VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1433{
1434 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1435}
1436
1437
1438VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1439{
1440 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1441 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1442}
1443
1444
1445VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1446{
1447 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1448 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1449}
1450
1451
1452VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1453{
1454 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1455 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1456}
1457
1458
1459VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1460{
1461 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1462 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1463}
1464
1465
1466VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1467{
1468 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1469 return VINF_SUCCESS; /* No need to recalc. */
1470}
1471
1472
1473VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1474{
1475 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1476 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1477}
1478
1479
1480VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1481{
1482 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1483 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1484 if (iReg == 4 || iReg == 5)
1485 iReg += 2;
1486 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1487 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1488}
1489
1490
1491/**
1492 * Recalculates the hypervisor DRx register values based on current guest
1493 * registers and DBGF breakpoints, updating changed registers depending on the
1494 * context.
1495 *
1496 * This is called whenever a guest DRx register is modified (any context) and
1497 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1498 *
1499 * In raw-mode context this function will reload any (hyper) DRx registers which
1500 * comes out with a different value. It may also have to save the host debug
1501 * registers if that haven't been done already. In this context though, we'll
1502 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1503 * are only important when breakpoints are actually enabled.
1504 *
1505 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1506 * reloaded by the HM code if it changes. Further more, we will only use the
1507 * combined register set when the VBox debugger is actually using hardware BPs,
1508 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1509 * concern us here).
1510 *
1511 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1512 * all the time.
1513 *
1514 * @returns VINF_SUCCESS.
1515 * @param pVCpu The cross context virtual CPU structure.
1516 * @param iGstReg The guest debug register number that was modified.
1517 * UINT8_MAX if not guest register.
1518 * @param fForceHyper Used in HM to force hyper registers because of single
1519 * stepping.
1520 */
1521VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1522{
1523 PVM pVM = pVCpu->CTX_SUFF(pVM);
1524
1525 /*
1526 * Compare the DR7s first.
1527 *
1528 * We only care about the enabled flags. GD is virtualized when we
1529 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1530 * always have the LE and GE bits set, so no need to check and disable
1531 * stuff if they're cleared like we have to for the guest DR7.
1532 */
1533 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1534 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1535 uGstDr7 = 0;
1536 else if (!(uGstDr7 & X86_DR7_LE))
1537 uGstDr7 &= ~X86_DR7_LE_ALL;
1538 else if (!(uGstDr7 & X86_DR7_GE))
1539 uGstDr7 &= ~X86_DR7_GE_ALL;
1540
1541 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1542
1543#ifdef IN_RING0
1544 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1545 fForceHyper = true;
1546#endif
1547 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1548 {
1549 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1550#ifdef IN_RC
1551 bool const fHmEnabled = false;
1552#elif defined(IN_RING3)
1553 bool const fHmEnabled = HMIsEnabled(pVM);
1554#endif
1555
1556 /*
1557 * Ok, something is enabled. Recalc each of the breakpoints, taking
1558 * the VM debugger ones of the guest ones. In raw-mode context we will
1559 * not allow breakpoints with values inside the hypervisor area.
1560 */
1561 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1562
1563 /* bp 0 */
1564 RTGCUINTREG uNewDr0;
1565 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1566 {
1567 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1568 uNewDr0 = DBGFBpGetDR0(pVM);
1569 }
1570 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1571 {
1572 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1573#ifndef IN_RING0
1574 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1575 uNewDr0 = 0;
1576 else
1577#endif
1578 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1579 }
1580 else
1581 uNewDr0 = 0;
1582
1583 /* bp 1 */
1584 RTGCUINTREG uNewDr1;
1585 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1586 {
1587 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1588 uNewDr1 = DBGFBpGetDR1(pVM);
1589 }
1590 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1591 {
1592 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1593#ifndef IN_RING0
1594 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1595 uNewDr1 = 0;
1596 else
1597#endif
1598 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1599 }
1600 else
1601 uNewDr1 = 0;
1602
1603 /* bp 2 */
1604 RTGCUINTREG uNewDr2;
1605 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1606 {
1607 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1608 uNewDr2 = DBGFBpGetDR2(pVM);
1609 }
1610 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1611 {
1612 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1613#ifndef IN_RING0
1614 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1615 uNewDr2 = 0;
1616 else
1617#endif
1618 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1619 }
1620 else
1621 uNewDr2 = 0;
1622
1623 /* bp 3 */
1624 RTGCUINTREG uNewDr3;
1625 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1626 {
1627 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1628 uNewDr3 = DBGFBpGetDR3(pVM);
1629 }
1630 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1631 {
1632 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1633#ifndef IN_RING0
1634 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1635 uNewDr3 = 0;
1636 else
1637#endif
1638 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1639 }
1640 else
1641 uNewDr3 = 0;
1642
1643 /*
1644 * Apply the updates.
1645 */
1646#ifdef IN_RC
1647 /* Make sure to save host registers first. */
1648 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1649 {
1650 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1651 {
1652 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1653 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1654 }
1655 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1656 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1657 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1658 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1659 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1660
1661 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1662 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1663 ASMSetDR0(uNewDr0);
1664 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1665 ASMSetDR1(uNewDr1);
1666 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1667 ASMSetDR2(uNewDr2);
1668 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1669 ASMSetDR3(uNewDr3);
1670 ASMSetDR6(X86_DR6_INIT_VAL);
1671 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1672 ASMSetDR7(uNewDr7);
1673 }
1674 else
1675#endif
1676 {
1677 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1678 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1679 CPUMSetHyperDR3(pVCpu, uNewDr3);
1680 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1681 CPUMSetHyperDR2(pVCpu, uNewDr2);
1682 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1683 CPUMSetHyperDR1(pVCpu, uNewDr1);
1684 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1685 CPUMSetHyperDR0(pVCpu, uNewDr0);
1686 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1687 CPUMSetHyperDR7(pVCpu, uNewDr7);
1688 }
1689 }
1690#ifdef IN_RING0
1691 else if (CPUMIsGuestDebugStateActive(pVCpu))
1692 {
1693 /*
1694 * Reload the register that was modified. Normally this won't happen
1695 * as we won't intercept DRx writes when not having the hyper debug
1696 * state loaded, but in case we do for some reason we'll simply deal
1697 * with it.
1698 */
1699 switch (iGstReg)
1700 {
1701 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1702 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1703 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1704 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1705 default:
1706 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1707 }
1708 }
1709#endif
1710 else
1711 {
1712 /*
1713 * No active debug state any more. In raw-mode this means we have to
1714 * make sure DR7 has everything disabled now, if we armed it already.
1715 * In ring-0 we might end up here when just single stepping.
1716 */
1717#if defined(IN_RC) || defined(IN_RING0)
1718 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1719 {
1720# ifdef IN_RC
1721 ASMSetDR7(X86_DR7_INIT_VAL);
1722# endif
1723 if (pVCpu->cpum.s.Hyper.dr[0])
1724 ASMSetDR0(0);
1725 if (pVCpu->cpum.s.Hyper.dr[1])
1726 ASMSetDR1(0);
1727 if (pVCpu->cpum.s.Hyper.dr[2])
1728 ASMSetDR2(0);
1729 if (pVCpu->cpum.s.Hyper.dr[3])
1730 ASMSetDR3(0);
1731 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1732 }
1733#endif
1734 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1735
1736 /* Clear all the registers. */
1737 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1738 pVCpu->cpum.s.Hyper.dr[3] = 0;
1739 pVCpu->cpum.s.Hyper.dr[2] = 0;
1740 pVCpu->cpum.s.Hyper.dr[1] = 0;
1741 pVCpu->cpum.s.Hyper.dr[0] = 0;
1742
1743 }
1744 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1745 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1746 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1747 pVCpu->cpum.s.Hyper.dr[7]));
1748
1749 return VINF_SUCCESS;
1750}
1751
1752
1753/**
1754 * Set the guest XCR0 register.
1755 *
1756 * Will load additional state if the FPU state is already loaded (in ring-0 &
1757 * raw-mode context).
1758 *
1759 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1760 * value.
1761 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1762 * @param uNewValue The new value.
1763 * @thread EMT(pVCpu)
1764 */
1765VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1766{
1767 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1768 /* The X87 bit cannot be cleared. */
1769 && (uNewValue & XSAVE_C_X87)
1770 /* AVX requires SSE. */
1771 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1772 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1773 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1774 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1775 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1776 )
1777 {
1778 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1779
1780 /* If more state components are enabled, we need to take care to load
1781 them if the FPU/SSE state is already loaded. May otherwise leak
1782 host state to the guest. */
1783 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1784 if (fNewComponents)
1785 {
1786#if defined(IN_RING0) || defined(IN_RC)
1787 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1788 {
1789 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1790 /* Adding more components. */
1791 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1792 else
1793 {
1794 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1795 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1796 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1797 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1798 }
1799 }
1800#endif
1801 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1802 }
1803 return VINF_SUCCESS;
1804 }
1805 return VERR_CPUM_RAISE_GP_0;
1806}
1807
1808
1809/**
1810 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1811 *
1812 * @returns true if in real mode, otherwise false.
1813 * @param pVCpu The cross context virtual CPU structure.
1814 */
1815VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1816{
1817 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1818}
1819
1820
1821/**
1822 * Tests if the guest has the Page Size Extension enabled (PSE).
1823 *
1824 * @returns true if in real mode, otherwise false.
1825 * @param pVCpu The cross context virtual CPU structure.
1826 */
1827VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1828{
1829 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1830 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1831}
1832
1833
1834/**
1835 * Tests if the guest has the paging enabled (PG).
1836 *
1837 * @returns true if in real mode, otherwise false.
1838 * @param pVCpu The cross context virtual CPU structure.
1839 */
1840VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1841{
1842 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1843}
1844
1845
1846/**
1847 * Tests if the guest has the paging enabled (PG).
1848 *
1849 * @returns true if in real mode, otherwise false.
1850 * @param pVCpu The cross context virtual CPU structure.
1851 */
1852VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1853{
1854 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1855}
1856
1857
1858/**
1859 * Tests if the guest is running in real mode or not.
1860 *
1861 * @returns true if in real mode, otherwise false.
1862 * @param pVCpu The cross context virtual CPU structure.
1863 */
1864VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1865{
1866 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1867}
1868
1869
1870/**
1871 * Tests if the guest is running in real or virtual 8086 mode.
1872 *
1873 * @returns @c true if it is, @c false if not.
1874 * @param pVCpu The cross context virtual CPU structure.
1875 */
1876VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
1877{
1878 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1879 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1880}
1881
1882
1883/**
1884 * Tests if the guest is running in protected or not.
1885 *
1886 * @returns true if in protected mode, otherwise false.
1887 * @param pVCpu The cross context virtual CPU structure.
1888 */
1889VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1890{
1891 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1892}
1893
1894
1895/**
1896 * Tests if the guest is running in paged protected or not.
1897 *
1898 * @returns true if in paged protected mode, otherwise false.
1899 * @param pVCpu The cross context virtual CPU structure.
1900 */
1901VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1902{
1903 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1904}
1905
1906
1907/**
1908 * Tests if the guest is running in long mode or not.
1909 *
1910 * @returns true if in long mode, otherwise false.
1911 * @param pVCpu The cross context virtual CPU structure.
1912 */
1913VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1914{
1915 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1916}
1917
1918
1919/**
1920 * Tests if the guest is running in PAE mode or not.
1921 *
1922 * @returns true if in PAE mode, otherwise false.
1923 * @param pVCpu The cross context virtual CPU structure.
1924 */
1925VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1926{
1927 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1928 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1929 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1930 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1931 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1932}
1933
1934
1935/**
1936 * Tests if the guest is running in 64 bits mode or not.
1937 *
1938 * @returns true if in 64 bits protected mode, otherwise false.
1939 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1940 */
1941VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1942{
1943 if (!CPUMIsGuestInLongMode(pVCpu))
1944 return false;
1945 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1946 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1947}
1948
1949
1950/**
1951 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1952 * registers.
1953 *
1954 * @returns true if in 64 bits protected mode, otherwise false.
1955 * @param pCtx Pointer to the current guest CPU context.
1956 */
1957VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1958{
1959 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1960}
1961
1962#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1963
1964/**
1965 *
1966 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
1967 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
1968 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1969 */
1970VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
1971{
1972 return pVCpu->cpum.s.fRawEntered;
1973}
1974
1975/**
1976 * Transforms the guest CPU state to raw-ring mode.
1977 *
1978 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
1979 *
1980 * @returns VBox status code. (recompiler failure)
1981 * @param pVCpu The cross context virtual CPU structure.
1982 * @see @ref pg_raw
1983 */
1984VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
1985{
1986 PVM pVM = pVCpu->CTX_SUFF(pVM);
1987
1988 Assert(!pVCpu->cpum.s.fRawEntered);
1989 Assert(!pVCpu->cpum.s.fRemEntered);
1990 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1991
1992 /*
1993 * Are we in Ring-0?
1994 */
1995 if ( pCtx->ss.Sel
1996 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
1997 && !pCtx->eflags.Bits.u1VM)
1998 {
1999 /*
2000 * Enter execution mode.
2001 */
2002 PATMRawEnter(pVM, pCtx);
2003
2004 /*
2005 * Set CPL to Ring-1.
2006 */
2007 pCtx->ss.Sel |= 1;
2008 if ( pCtx->cs.Sel
2009 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2010 pCtx->cs.Sel |= 1;
2011 }
2012 else
2013 {
2014# ifdef VBOX_WITH_RAW_RING1
2015 if ( EMIsRawRing1Enabled(pVM)
2016 && !pCtx->eflags.Bits.u1VM
2017 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2018 {
2019 /* Set CPL to Ring-2. */
2020 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2021 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2022 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2023 }
2024# else
2025 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2026 ("ring-1 code not supported\n"));
2027# endif
2028 /*
2029 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2030 */
2031 PATMRawEnter(pVM, pCtx);
2032 }
2033
2034 /*
2035 * Assert sanity.
2036 */
2037 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2038 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2039 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2040 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE));
2041
2042 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2043
2044 pVCpu->cpum.s.fRawEntered = true;
2045 return VINF_SUCCESS;
2046}
2047
2048
2049/**
2050 * Transforms the guest CPU state from raw-ring mode to correct values.
2051 *
2052 * This function will change any selector registers with DPL=1 to DPL=0.
2053 *
2054 * @returns Adjusted rc.
2055 * @param pVCpu The cross context virtual CPU structure.
2056 * @param rc Raw mode return code
2057 * @see @ref pg_raw
2058 */
2059VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2060{
2061 PVM pVM = pVCpu->CTX_SUFF(pVM);
2062
2063 /*
2064 * Don't leave if we've already left (in RC).
2065 */
2066 Assert(!pVCpu->cpum.s.fRemEntered);
2067 if (!pVCpu->cpum.s.fRawEntered)
2068 return rc;
2069 pVCpu->cpum.s.fRawEntered = false;
2070
2071 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2072 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2073 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2074 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2075
2076 /*
2077 * Are we executing in raw ring-1?
2078 */
2079 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2080 && !pCtx->eflags.Bits.u1VM)
2081 {
2082 /*
2083 * Leave execution mode.
2084 */
2085 PATMRawLeave(pVM, pCtx, rc);
2086 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2087 /** @todo See what happens if we remove this. */
2088 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2089 pCtx->ds.Sel &= ~X86_SEL_RPL;
2090 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2091 pCtx->es.Sel &= ~X86_SEL_RPL;
2092 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2093 pCtx->fs.Sel &= ~X86_SEL_RPL;
2094 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2095 pCtx->gs.Sel &= ~X86_SEL_RPL;
2096
2097 /*
2098 * Ring-1 selector => Ring-0.
2099 */
2100 pCtx->ss.Sel &= ~X86_SEL_RPL;
2101 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2102 pCtx->cs.Sel &= ~X86_SEL_RPL;
2103 }
2104 else
2105 {
2106 /*
2107 * PATM is taking care of the IOPL and IF flags for us.
2108 */
2109 PATMRawLeave(pVM, pCtx, rc);
2110 if (!pCtx->eflags.Bits.u1VM)
2111 {
2112# ifdef VBOX_WITH_RAW_RING1
2113 if ( EMIsRawRing1Enabled(pVM)
2114 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2115 {
2116 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2117 /** @todo See what happens if we remove this. */
2118 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2119 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2120 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2121 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2122 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2123 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2124 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2125 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2126
2127 /*
2128 * Ring-2 selector => Ring-1.
2129 */
2130 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2131 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2132 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2133 }
2134 else
2135 {
2136# endif
2137 /** @todo See what happens if we remove this. */
2138 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2139 pCtx->ds.Sel &= ~X86_SEL_RPL;
2140 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2141 pCtx->es.Sel &= ~X86_SEL_RPL;
2142 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2143 pCtx->fs.Sel &= ~X86_SEL_RPL;
2144 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2145 pCtx->gs.Sel &= ~X86_SEL_RPL;
2146# ifdef VBOX_WITH_RAW_RING1
2147 }
2148# endif
2149 }
2150 }
2151
2152 return rc;
2153}
2154
2155#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2156
2157/**
2158 * Updates the EFLAGS while we're in raw-mode.
2159 *
2160 * @param pVCpu The cross context virtual CPU structure.
2161 * @param fEfl The new EFLAGS value.
2162 */
2163VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2164{
2165#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2166 if (pVCpu->cpum.s.fRawEntered)
2167 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2168 else
2169#endif
2170 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2171}
2172
2173
2174/**
2175 * Gets the EFLAGS while we're in raw-mode.
2176 *
2177 * @returns The eflags.
2178 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2179 */
2180VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2181{
2182#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2183 if (pVCpu->cpum.s.fRawEntered)
2184 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2185#endif
2186 return pVCpu->cpum.s.Guest.eflags.u32;
2187}
2188
2189
2190/**
2191 * Sets the specified changed flags (CPUM_CHANGED_*).
2192 *
2193 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2194 * @param fChangedAdd The changed flags to add.
2195 */
2196VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2197{
2198 pVCpu->cpum.s.fChanged |= fChangedAdd;
2199}
2200
2201
2202/**
2203 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2204 *
2205 * @returns true if supported.
2206 * @returns false if not supported.
2207 * @param pVM The cross context VM structure.
2208 */
2209VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2210{
2211 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2212}
2213
2214
2215/**
2216 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2217 * @returns true if used.
2218 * @returns false if not used.
2219 * @param pVM The cross context VM structure.
2220 */
2221VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2222{
2223 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2224}
2225
2226
2227/**
2228 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2229 * @returns true if used.
2230 * @returns false if not used.
2231 * @param pVM The cross context VM structure.
2232 */
2233VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2234{
2235 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2236}
2237
2238#ifdef IN_RC
2239
2240/**
2241 * Lazily sync in the FPU/XMM state.
2242 *
2243 * @returns VBox status code.
2244 * @param pVCpu The cross context virtual CPU structure.
2245 */
2246VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2247{
2248 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2249}
2250
2251#endif /* !IN_RC */
2252
2253/**
2254 * Checks if we activated the FPU/XMM state of the guest OS.
2255 *
2256 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2257 * time we'll be executing guest code, so it may return true for 64-on-32 when
2258 * we still haven't actually loaded the FPU status, just scheduled it to be
2259 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2260 *
2261 * @returns true / false.
2262 * @param pVCpu The cross context virtual CPU structure.
2263 */
2264VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2265{
2266 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2267}
2268
2269
2270/**
2271 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2272 *
2273 * @returns true / false.
2274 * @param pVCpu The cross context virtual CPU structure.
2275 */
2276VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2277{
2278 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2279}
2280
2281
2282/**
2283 * Checks if we saved the FPU/XMM state of the host OS.
2284 *
2285 * @returns true / false.
2286 * @param pVCpu The cross context virtual CPU structure.
2287 */
2288VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2289{
2290 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2291}
2292
2293
2294/**
2295 * Checks if the guest debug state is active.
2296 *
2297 * @returns boolean
2298 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2299 */
2300VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2301{
2302 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2303}
2304
2305
2306/**
2307 * Checks if the guest debug state is to be made active during the world-switch
2308 * (currently only used for the 32->64 switcher case).
2309 *
2310 * @returns boolean
2311 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2312 */
2313VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2314{
2315 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2316}
2317
2318
2319/**
2320 * Checks if the hyper debug state is active.
2321 *
2322 * @returns boolean
2323 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2324 */
2325VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2326{
2327 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2328}
2329
2330
2331/**
2332 * Checks if the hyper debug state is to be made active during the world-switch
2333 * (currently only used for the 32->64 switcher case).
2334 *
2335 * @returns boolean
2336 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2337 */
2338VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2339{
2340 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2341}
2342
2343
2344/**
2345 * Mark the guest's debug state as inactive.
2346 *
2347 * @returns boolean
2348 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2349 * @todo This API doesn't make sense any more.
2350 */
2351VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2352{
2353 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2354 NOREF(pVCpu);
2355}
2356
2357
2358/**
2359 * Get the current privilege level of the guest.
2360 *
2361 * @returns CPL
2362 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2363 */
2364VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2365{
2366 /*
2367 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2368 *
2369 * Note! We used to check CS.DPL here, assuming it was always equal to
2370 * CPL even if a conforming segment was loaded. But this truned out to
2371 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2372 * during install after a far call to ring 2 with VT-x. Then on newer
2373 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2374 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2375 *
2376 * So, forget CS.DPL, always use SS.DPL.
2377 *
2378 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2379 * isn't necessarily equal if the segment is conforming.
2380 * See section 4.11.1 in the AMD manual.
2381 *
2382 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2383 * right after real->prot mode switch and when in V8086 mode? That
2384 * section says the RPL specified in a direct transfere (call, jmp,
2385 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2386 * it would be impossible for an exception handle or the iret
2387 * instruction to figure out whether SS:ESP are part of the frame
2388 * or not. VBox or qemu bug must've lead to this misconception.
2389 *
2390 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2391 * selector into SS with an RPL other than the CPL when CPL != 3 and
2392 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2393 * RPL = CPL. Weird.
2394 */
2395 uint32_t uCpl;
2396 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2397 {
2398 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2399 {
2400 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2401 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2402 else
2403 {
2404 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2405#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2406# ifdef VBOX_WITH_RAW_RING1
2407 if (pVCpu->cpum.s.fRawEntered)
2408 {
2409 if ( uCpl == 2
2410 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2411 uCpl = 1;
2412 else if (uCpl == 1)
2413 uCpl = 0;
2414 }
2415 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2416# else
2417 if (uCpl == 1)
2418 uCpl = 0;
2419# endif
2420#endif
2421 }
2422 }
2423 else
2424 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2425 }
2426 else
2427 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2428 return uCpl;
2429}
2430
2431
2432/**
2433 * Gets the current guest CPU mode.
2434 *
2435 * If paging mode is what you need, check out PGMGetGuestMode().
2436 *
2437 * @returns The CPU mode.
2438 * @param pVCpu The cross context virtual CPU structure.
2439 */
2440VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2441{
2442 CPUMMODE enmMode;
2443 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2444 enmMode = CPUMMODE_REAL;
2445 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2446 enmMode = CPUMMODE_PROTECTED;
2447 else
2448 enmMode = CPUMMODE_LONG;
2449
2450 return enmMode;
2451}
2452
2453
2454/**
2455 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2456 *
2457 * @returns 16, 32 or 64.
2458 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2459 */
2460VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2461{
2462 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2463 return 16;
2464
2465 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2466 {
2467 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2468 return 16;
2469 }
2470
2471 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2472 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2473 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2474 return 64;
2475
2476 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2477 return 32;
2478
2479 return 16;
2480}
2481
2482
2483VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2484{
2485 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2486 return DISCPUMODE_16BIT;
2487
2488 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2489 {
2490 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2491 return DISCPUMODE_16BIT;
2492 }
2493
2494 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2495 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2496 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2497 return DISCPUMODE_64BIT;
2498
2499 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2500 return DISCPUMODE_32BIT;
2501
2502 return DISCPUMODE_16BIT;
2503}
2504
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette