VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 64720

Last change on this file since 64720 was 64720, checked in by vboxsync, 8 years ago

DBGF: Added DBGFR3StepEx for simple step-over support as well as both step/trace to call, step/trace to return, step/trace to one instruction after return. Also added DBGFR3CpuIsInV86Code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 72.1 KB
Line 
1/* $Id: CPUMAllRegs.cpp 64720 2016-11-20 02:00:02Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/apic.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 */
157VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
158{
159 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
160}
161
162#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
163
164
165/**
166 * Obsolete.
167 *
168 * We don't support nested hypervisor context interrupts or traps. Life is much
169 * simpler when we don't. It's also slightly faster at times.
170 *
171 * @param pVCpu The cross context virtual CPU structure.
172 */
173VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
174{
175 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
176}
177
178
179/**
180 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
181 *
182 * @param pVCpu The cross context virtual CPU structure.
183 */
184VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
185{
186 return &pVCpu->cpum.s.Hyper;
187}
188
189
190VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
191{
192 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
193 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
194}
195
196
197VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
198{
199 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
200 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
201}
202
203
204VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
205{
206 pVCpu->cpum.s.Hyper.cr3 = cr3;
207
208#ifdef IN_RC
209 /* Update the current CR3. */
210 ASMSetCR3(cr3);
211#endif
212}
213
214VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
215{
216 return pVCpu->cpum.s.Hyper.cr3;
217}
218
219
220VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
221{
222 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
223}
224
225
226VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
227{
228 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
229}
230
231
232VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
233{
234 pVCpu->cpum.s.Hyper.es.Sel = SelES;
235}
236
237
238VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
239{
240 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
241}
242
243
244VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
245{
246 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
247}
248
249
250VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
251{
252 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
253}
254
255
256VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
257{
258 pVCpu->cpum.s.Hyper.esp = u32ESP;
259}
260
261
262VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
263{
264 pVCpu->cpum.s.Hyper.esp = u32ESP;
265}
266
267
268VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
269{
270 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
271 return VINF_SUCCESS;
272}
273
274
275VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
276{
277 pVCpu->cpum.s.Hyper.eip = u32EIP;
278}
279
280
281/**
282 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
283 * EFLAGS and EIP prior to resuming guest execution.
284 *
285 * All general register not given as a parameter will be set to 0. The EFLAGS
286 * register will be set to sane values for C/C++ code execution with interrupts
287 * disabled and IOPL 0.
288 *
289 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
290 * @param u32EIP The EIP value.
291 * @param u32ESP The ESP value.
292 * @param u32EAX The EAX value.
293 * @param u32EDX The EDX value.
294 */
295VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
296{
297 pVCpu->cpum.s.Hyper.eip = u32EIP;
298 pVCpu->cpum.s.Hyper.esp = u32ESP;
299 pVCpu->cpum.s.Hyper.eax = u32EAX;
300 pVCpu->cpum.s.Hyper.edx = u32EDX;
301 pVCpu->cpum.s.Hyper.ecx = 0;
302 pVCpu->cpum.s.Hyper.ebx = 0;
303 pVCpu->cpum.s.Hyper.ebp = 0;
304 pVCpu->cpum.s.Hyper.esi = 0;
305 pVCpu->cpum.s.Hyper.edi = 0;
306 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
307}
308
309
310VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
311{
312 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
313}
314
315
316VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
317{
318 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
319}
320
321
322/** @def MAYBE_LOAD_DRx
323 * Macro for updating DRx values in raw-mode and ring-0 contexts.
324 */
325#ifdef IN_RING0
326# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
327# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
328 do { \
329 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
330 a_fnLoad(a_uValue); \
331 else \
332 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
333 } while (0)
334# else
335# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
336 do { \
337 a_fnLoad(a_uValue); \
338 } while (0)
339# endif
340
341#elif defined(IN_RC)
342# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
343 do { \
344 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
345 { a_fnLoad(a_uValue); } \
346 } while (0)
347
348#else
349# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
350#endif
351
352VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
353{
354 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
355 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
356}
357
358
359VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
360{
361 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
362 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
363}
364
365
366VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
367{
368 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
369 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
370}
371
372
373VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
374{
375 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
376 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
377}
378
379
380VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
381{
382 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
383}
384
385
386VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
387{
388 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
389#ifdef IN_RC
390 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
391#endif
392}
393
394
395VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
396{
397 return pVCpu->cpum.s.Hyper.cs.Sel;
398}
399
400
401VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
402{
403 return pVCpu->cpum.s.Hyper.ds.Sel;
404}
405
406
407VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
408{
409 return pVCpu->cpum.s.Hyper.es.Sel;
410}
411
412
413VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
414{
415 return pVCpu->cpum.s.Hyper.fs.Sel;
416}
417
418
419VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
420{
421 return pVCpu->cpum.s.Hyper.gs.Sel;
422}
423
424
425VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
426{
427 return pVCpu->cpum.s.Hyper.ss.Sel;
428}
429
430
431VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
432{
433 return pVCpu->cpum.s.Hyper.eax;
434}
435
436
437VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
438{
439 return pVCpu->cpum.s.Hyper.ebx;
440}
441
442
443VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
444{
445 return pVCpu->cpum.s.Hyper.ecx;
446}
447
448
449VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
450{
451 return pVCpu->cpum.s.Hyper.edx;
452}
453
454
455VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
456{
457 return pVCpu->cpum.s.Hyper.esi;
458}
459
460
461VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
462{
463 return pVCpu->cpum.s.Hyper.edi;
464}
465
466
467VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
468{
469 return pVCpu->cpum.s.Hyper.ebp;
470}
471
472
473VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
474{
475 return pVCpu->cpum.s.Hyper.esp;
476}
477
478
479VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
480{
481 return pVCpu->cpum.s.Hyper.eflags.u32;
482}
483
484
485VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
486{
487 return pVCpu->cpum.s.Hyper.eip;
488}
489
490
491VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
492{
493 return pVCpu->cpum.s.Hyper.rip;
494}
495
496
497VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
498{
499 if (pcbLimit)
500 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
501 return pVCpu->cpum.s.Hyper.idtr.pIdt;
502}
503
504
505VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
506{
507 if (pcbLimit)
508 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
509 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
510}
511
512
513VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
514{
515 return pVCpu->cpum.s.Hyper.ldtr.Sel;
516}
517
518
519VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
520{
521 return pVCpu->cpum.s.Hyper.dr[0];
522}
523
524
525VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
526{
527 return pVCpu->cpum.s.Hyper.dr[1];
528}
529
530
531VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
532{
533 return pVCpu->cpum.s.Hyper.dr[2];
534}
535
536
537VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
538{
539 return pVCpu->cpum.s.Hyper.dr[3];
540}
541
542
543VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
544{
545 return pVCpu->cpum.s.Hyper.dr[6];
546}
547
548
549VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
550{
551 return pVCpu->cpum.s.Hyper.dr[7];
552}
553
554
555/**
556 * Gets the pointer to the internal CPUMCTXCORE structure.
557 * This is only for reading in order to save a few calls.
558 *
559 * @param pVCpu The cross context virtual CPU structure.
560 */
561VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
562{
563 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
564}
565
566
567/**
568 * Queries the pointer to the internal CPUMCTX structure.
569 *
570 * @returns The CPUMCTX pointer.
571 * @param pVCpu The cross context virtual CPU structure.
572 */
573VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
574{
575 return &pVCpu->cpum.s.Guest;
576}
577
578VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
579{
580#ifdef VBOX_WITH_RAW_MODE_NOT_R0
581 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
583#endif
584 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
585 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
586 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
587 return VINF_SUCCESS; /* formality, consider it void. */
588}
589
590VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
591{
592#ifdef VBOX_WITH_RAW_MODE_NOT_R0
593 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
594 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
595#endif
596 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
597 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
598 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
599 return VINF_SUCCESS; /* formality, consider it void. */
600}
601
602VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
603{
604#ifdef VBOX_WITH_RAW_MODE_NOT_R0
605 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
606 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
607#endif
608 pVCpu->cpum.s.Guest.tr.Sel = tr;
609 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
610 return VINF_SUCCESS; /* formality, consider it void. */
611}
612
613VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
614{
615#ifdef VBOX_WITH_RAW_MODE_NOT_R0
616 if ( ( ldtr != 0
617 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
618 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
619 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
620#endif
621 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
622 /* The caller will set more hidden bits if it has them. */
623 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
624 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
625 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
626 return VINF_SUCCESS; /* formality, consider it void. */
627}
628
629
630/**
631 * Set the guest CR0.
632 *
633 * When called in GC, the hyper CR0 may be updated if that is
634 * required. The caller only has to take special action if AM,
635 * WP, PG or PE changes.
636 *
637 * @returns VINF_SUCCESS (consider it void).
638 * @param pVCpu The cross context virtual CPU structure.
639 * @param cr0 The new CR0 value.
640 */
641VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
642{
643#ifdef IN_RC
644 /*
645 * Check if we need to change hypervisor CR0 because
646 * of math stuff.
647 */
648 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
649 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
650 {
651 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
652 {
653 /*
654 * We haven't loaded the guest FPU state yet, so TS and MT are both set
655 * and EM should be reflecting the guest EM (it always does this).
656 */
657 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
658 {
659 uint32_t HyperCR0 = ASMGetCR0();
660 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
661 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
662 HyperCR0 &= ~X86_CR0_EM;
663 HyperCR0 |= cr0 & X86_CR0_EM;
664 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
665 ASMSetCR0(HyperCR0);
666 }
667# ifdef VBOX_STRICT
668 else
669 {
670 uint32_t HyperCR0 = ASMGetCR0();
671 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
672 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
673 }
674# endif
675 }
676 else
677 {
678 /*
679 * Already loaded the guest FPU state, so we're just mirroring
680 * the guest flags.
681 */
682 uint32_t HyperCR0 = ASMGetCR0();
683 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
684 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
685 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
686 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
687 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
688 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
689 ASMSetCR0(HyperCR0);
690 }
691 }
692#endif /* IN_RC */
693
694 /*
695 * Check for changes causing TLB flushes (for REM).
696 * The caller is responsible for calling PGM when appropriate.
697 */
698 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
699 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
700 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
701 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
702
703 /*
704 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
705 */
706 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
707 PGMCr0WpEnabled(pVCpu);
708
709 /* The ET flag is settable on a 386 and hardwired on 486+. */
710 if ( !(cr0 & X86_CR0_ET)
711 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
712 cr0 |= X86_CR0_ET;
713
714 pVCpu->cpum.s.Guest.cr0 = cr0;
715 return VINF_SUCCESS;
716}
717
718
719VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
720{
721 pVCpu->cpum.s.Guest.cr2 = cr2;
722 return VINF_SUCCESS;
723}
724
725
726VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
727{
728 pVCpu->cpum.s.Guest.cr3 = cr3;
729 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
730 return VINF_SUCCESS;
731}
732
733
734VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
735{
736 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
737
738 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
739 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
740 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
741
742 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
743 pVCpu->cpum.s.Guest.cr4 = cr4;
744 return VINF_SUCCESS;
745}
746
747
748VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
749{
750 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
756{
757 pVCpu->cpum.s.Guest.eip = eip;
758 return VINF_SUCCESS;
759}
760
761
762VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
763{
764 pVCpu->cpum.s.Guest.eax = eax;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
770{
771 pVCpu->cpum.s.Guest.ebx = ebx;
772 return VINF_SUCCESS;
773}
774
775
776VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
777{
778 pVCpu->cpum.s.Guest.ecx = ecx;
779 return VINF_SUCCESS;
780}
781
782
783VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
784{
785 pVCpu->cpum.s.Guest.edx = edx;
786 return VINF_SUCCESS;
787}
788
789
790VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
791{
792 pVCpu->cpum.s.Guest.esp = esp;
793 return VINF_SUCCESS;
794}
795
796
797VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
798{
799 pVCpu->cpum.s.Guest.ebp = ebp;
800 return VINF_SUCCESS;
801}
802
803
804VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
805{
806 pVCpu->cpum.s.Guest.esi = esi;
807 return VINF_SUCCESS;
808}
809
810
811VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
812{
813 pVCpu->cpum.s.Guest.edi = edi;
814 return VINF_SUCCESS;
815}
816
817
818VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
819{
820 pVCpu->cpum.s.Guest.ss.Sel = ss;
821 return VINF_SUCCESS;
822}
823
824
825VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
826{
827 pVCpu->cpum.s.Guest.cs.Sel = cs;
828 return VINF_SUCCESS;
829}
830
831
832VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
833{
834 pVCpu->cpum.s.Guest.ds.Sel = ds;
835 return VINF_SUCCESS;
836}
837
838
839VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
840{
841 pVCpu->cpum.s.Guest.es.Sel = es;
842 return VINF_SUCCESS;
843}
844
845
846VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
847{
848 pVCpu->cpum.s.Guest.fs.Sel = fs;
849 return VINF_SUCCESS;
850}
851
852
853VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
854{
855 pVCpu->cpum.s.Guest.gs.Sel = gs;
856 return VINF_SUCCESS;
857}
858
859
860VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
861{
862 pVCpu->cpum.s.Guest.msrEFER = val;
863}
864
865
866VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
867{
868 if (pcbLimit)
869 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
870 return pVCpu->cpum.s.Guest.idtr.pIdt;
871}
872
873
874VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
875{
876 if (pHidden)
877 *pHidden = pVCpu->cpum.s.Guest.tr;
878 return pVCpu->cpum.s.Guest.tr.Sel;
879}
880
881
882VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
883{
884 return pVCpu->cpum.s.Guest.cs.Sel;
885}
886
887
888VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
889{
890 return pVCpu->cpum.s.Guest.ds.Sel;
891}
892
893
894VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
895{
896 return pVCpu->cpum.s.Guest.es.Sel;
897}
898
899
900VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
901{
902 return pVCpu->cpum.s.Guest.fs.Sel;
903}
904
905
906VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
907{
908 return pVCpu->cpum.s.Guest.gs.Sel;
909}
910
911
912VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
913{
914 return pVCpu->cpum.s.Guest.ss.Sel;
915}
916
917
918VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
919{
920 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
921 if ( !CPUMIsGuestInLongMode(pVCpu)
922 || pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
923 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
924 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
925}
926
927
928VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
929{
930 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
931 if ( !CPUMIsGuestInLongMode(pVCpu)
932 || pVCpu->cpum.s.Guest.ss.Attr.n.u1Long)
933 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
934 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
935}
936
937
938VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
939{
940 return pVCpu->cpum.s.Guest.ldtr.Sel;
941}
942
943
944VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
945{
946 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
947 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
948 return pVCpu->cpum.s.Guest.ldtr.Sel;
949}
950
951
952VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
953{
954 return pVCpu->cpum.s.Guest.cr0;
955}
956
957
958VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
959{
960 return pVCpu->cpum.s.Guest.cr2;
961}
962
963
964VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
965{
966 return pVCpu->cpum.s.Guest.cr3;
967}
968
969
970VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
971{
972 return pVCpu->cpum.s.Guest.cr4;
973}
974
975
976VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
977{
978 uint64_t u64;
979 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
980 if (RT_FAILURE(rc))
981 u64 = 0;
982 return u64;
983}
984
985
986VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
987{
988 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
989}
990
991
992VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
993{
994 return pVCpu->cpum.s.Guest.eip;
995}
996
997
998VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
999{
1000 return pVCpu->cpum.s.Guest.rip;
1001}
1002
1003
1004VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1005{
1006 return pVCpu->cpum.s.Guest.eax;
1007}
1008
1009
1010VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1011{
1012 return pVCpu->cpum.s.Guest.ebx;
1013}
1014
1015
1016VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1017{
1018 return pVCpu->cpum.s.Guest.ecx;
1019}
1020
1021
1022VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1023{
1024 return pVCpu->cpum.s.Guest.edx;
1025}
1026
1027
1028VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1029{
1030 return pVCpu->cpum.s.Guest.esi;
1031}
1032
1033
1034VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1035{
1036 return pVCpu->cpum.s.Guest.edi;
1037}
1038
1039
1040VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1041{
1042 return pVCpu->cpum.s.Guest.esp;
1043}
1044
1045
1046VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1047{
1048 return pVCpu->cpum.s.Guest.ebp;
1049}
1050
1051
1052VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1053{
1054 return pVCpu->cpum.s.Guest.eflags.u32;
1055}
1056
1057
1058VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1059{
1060 switch (iReg)
1061 {
1062 case DISCREG_CR0:
1063 *pValue = pVCpu->cpum.s.Guest.cr0;
1064 break;
1065
1066 case DISCREG_CR2:
1067 *pValue = pVCpu->cpum.s.Guest.cr2;
1068 break;
1069
1070 case DISCREG_CR3:
1071 *pValue = pVCpu->cpum.s.Guest.cr3;
1072 break;
1073
1074 case DISCREG_CR4:
1075 *pValue = pVCpu->cpum.s.Guest.cr4;
1076 break;
1077
1078 case DISCREG_CR8:
1079 {
1080 uint8_t u8Tpr;
1081 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1082 if (RT_FAILURE(rc))
1083 {
1084 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1085 *pValue = 0;
1086 return rc;
1087 }
1088 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
1089 break;
1090 }
1091
1092 default:
1093 return VERR_INVALID_PARAMETER;
1094 }
1095 return VINF_SUCCESS;
1096}
1097
1098
1099VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1100{
1101 return pVCpu->cpum.s.Guest.dr[0];
1102}
1103
1104
1105VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1106{
1107 return pVCpu->cpum.s.Guest.dr[1];
1108}
1109
1110
1111VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1112{
1113 return pVCpu->cpum.s.Guest.dr[2];
1114}
1115
1116
1117VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1118{
1119 return pVCpu->cpum.s.Guest.dr[3];
1120}
1121
1122
1123VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1124{
1125 return pVCpu->cpum.s.Guest.dr[6];
1126}
1127
1128
1129VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1130{
1131 return pVCpu->cpum.s.Guest.dr[7];
1132}
1133
1134
1135VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1136{
1137 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1138 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1139 if (iReg == 4 || iReg == 5)
1140 iReg += 2;
1141 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1142 return VINF_SUCCESS;
1143}
1144
1145
1146VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1147{
1148 return pVCpu->cpum.s.Guest.msrEFER;
1149}
1150
1151
1152/**
1153 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1154 *
1155 * @returns Pointer to the leaf if found, NULL if not.
1156 *
1157 * @param pVM The cross context VM structure.
1158 * @param uLeaf The leaf to get.
1159 */
1160PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1161{
1162 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1163 if (iEnd)
1164 {
1165 unsigned iStart = 0;
1166 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1167 for (;;)
1168 {
1169 unsigned i = iStart + (iEnd - iStart) / 2U;
1170 if (uLeaf < paLeaves[i].uLeaf)
1171 {
1172 if (i <= iStart)
1173 return NULL;
1174 iEnd = i;
1175 }
1176 else if (uLeaf > paLeaves[i].uLeaf)
1177 {
1178 i += 1;
1179 if (i >= iEnd)
1180 return NULL;
1181 iStart = i;
1182 }
1183 else
1184 {
1185 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1186 return &paLeaves[i];
1187
1188 /* This shouldn't normally happen. But in case the it does due
1189 to user configuration overrids or something, just return the
1190 first sub-leaf. */
1191 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1192 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1193 while ( paLeaves[i].uSubLeaf != 0
1194 && i > 0
1195 && uLeaf == paLeaves[i - 1].uLeaf)
1196 i--;
1197 return &paLeaves[i];
1198 }
1199 }
1200 }
1201
1202 return NULL;
1203}
1204
1205
1206/**
1207 * Looks up a CPUID leaf in the CPUID leaf array.
1208 *
1209 * @returns Pointer to the leaf if found, NULL if not.
1210 *
1211 * @param pVM The cross context VM structure.
1212 * @param uLeaf The leaf to get.
1213 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1214 * isn't.
1215 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1216 */
1217PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1218{
1219 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1220 if (iEnd)
1221 {
1222 unsigned iStart = 0;
1223 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1224 for (;;)
1225 {
1226 unsigned i = iStart + (iEnd - iStart) / 2U;
1227 if (uLeaf < paLeaves[i].uLeaf)
1228 {
1229 if (i <= iStart)
1230 return NULL;
1231 iEnd = i;
1232 }
1233 else if (uLeaf > paLeaves[i].uLeaf)
1234 {
1235 i += 1;
1236 if (i >= iEnd)
1237 return NULL;
1238 iStart = i;
1239 }
1240 else
1241 {
1242 uSubLeaf &= paLeaves[i].fSubLeafMask;
1243 if (uSubLeaf == paLeaves[i].uSubLeaf)
1244 *pfExactSubLeafHit = true;
1245 else
1246 {
1247 /* Find the right subleaf. We return the last one before
1248 uSubLeaf if we don't find an exact match. */
1249 if (uSubLeaf < paLeaves[i].uSubLeaf)
1250 while ( i > 0
1251 && uLeaf == paLeaves[i - 1].uLeaf
1252 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1253 i--;
1254 else
1255 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1256 && uLeaf == paLeaves[i + 1].uLeaf
1257 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1258 i++;
1259 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1260 }
1261 return &paLeaves[i];
1262 }
1263 }
1264 }
1265
1266 *pfExactSubLeafHit = false;
1267 return NULL;
1268}
1269
1270
1271/**
1272 * Gets a CPUID leaf.
1273 *
1274 * @param pVCpu The cross context virtual CPU structure.
1275 * @param uLeaf The CPUID leaf to get.
1276 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1277 * @param pEax Where to store the EAX value.
1278 * @param pEbx Where to store the EBX value.
1279 * @param pEcx Where to store the ECX value.
1280 * @param pEdx Where to store the EDX value.
1281 */
1282VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1283 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1284{
1285 bool fExactSubLeafHit;
1286 PVM pVM = pVCpu->CTX_SUFF(pVM);
1287 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1288 if (pLeaf)
1289 {
1290 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1291 if (fExactSubLeafHit)
1292 {
1293 *pEax = pLeaf->uEax;
1294 *pEbx = pLeaf->uEbx;
1295 *pEcx = pLeaf->uEcx;
1296 *pEdx = pLeaf->uEdx;
1297
1298 /*
1299 * Deal with CPU specific information.
1300 */
1301 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1302 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1303 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1304 {
1305 if (uLeaf == 1)
1306 {
1307 /* EBX: Bits 31-24: Initial APIC ID. */
1308 Assert(pVCpu->idCpu <= 255);
1309 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1310 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1311
1312 /* EDX: Bit 9: AND with APICBASE.EN. */
1313 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1314 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1315
1316 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1317 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1318 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1319 }
1320 else if (uLeaf == 0xb)
1321 {
1322 /* EDX: Initial extended APIC ID. */
1323 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1324 *pEdx = pVCpu->idCpu;
1325 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1326 }
1327 else if (uLeaf == UINT32_C(0x8000001e))
1328 {
1329 /* EAX: Initial extended APIC ID. */
1330 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1331 *pEax = pVCpu->idCpu;
1332 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1333 }
1334 else if (uLeaf == UINT32_C(0x80000001))
1335 {
1336 /* EDX: Bit 9: AND with APICBASE.EN. */
1337 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1338 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1339 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1340 }
1341 else
1342 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1343 }
1344 }
1345 /*
1346 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1347 * them here, but we do the best we can here...
1348 */
1349 else
1350 {
1351 *pEax = *pEbx = *pEcx = *pEdx = 0;
1352 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1353 {
1354 *pEcx = uSubLeaf & 0xff;
1355 *pEdx = pVCpu->idCpu;
1356 }
1357 }
1358 }
1359 else
1360 {
1361 /*
1362 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1363 */
1364 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1365 {
1366 default:
1367 AssertFailed();
1368 case CPUMUNKNOWNCPUID_DEFAULTS:
1369 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1370 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1371 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1372 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1373 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1374 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1375 break;
1376 case CPUMUNKNOWNCPUID_PASSTHRU:
1377 *pEax = uLeaf;
1378 *pEbx = 0;
1379 *pEcx = uSubLeaf;
1380 *pEdx = 0;
1381 break;
1382 }
1383 }
1384 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1385}
1386
1387
1388/**
1389 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1390 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1391 *
1392 * @returns Previous value.
1393 * @param pVCpu The cross context virtual CPU structure to make the
1394 * change on. Usually the calling EMT.
1395 * @param fVisible Whether to make it visible (true) or hide it (false).
1396 *
1397 * @remarks This is "VMMDECL" so that it still links with
1398 * the old APIC code which is in VBoxDD2 and not in
1399 * the VMM module.
1400 */
1401VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1402{
1403 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1404 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1405
1406#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1407 /*
1408 * Patch manager saved state legacy pain.
1409 */
1410 PVM pVM = pVCpu->CTX_SUFF(pVM);
1411 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1412 if (pLeaf)
1413 {
1414 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1415 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1416 else
1417 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1418 }
1419
1420 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1421 if (pLeaf)
1422 {
1423 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1424 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1425 else
1426 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1427 }
1428#endif
1429
1430 return fOld;
1431}
1432
1433
1434/**
1435 * Gets the host CPU vendor.
1436 *
1437 * @returns CPU vendor.
1438 * @param pVM The cross context VM structure.
1439 */
1440VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1441{
1442 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1443}
1444
1445
1446/**
1447 * Gets the CPU vendor.
1448 *
1449 * @returns CPU vendor.
1450 * @param pVM The cross context VM structure.
1451 */
1452VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1453{
1454 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1455}
1456
1457
1458VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1459{
1460 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1461 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1462}
1463
1464
1465VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1466{
1467 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1468 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1469}
1470
1471
1472VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1473{
1474 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1475 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1476}
1477
1478
1479VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1480{
1481 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1482 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1483}
1484
1485
1486VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1487{
1488 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1489 return VINF_SUCCESS; /* No need to recalc. */
1490}
1491
1492
1493VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1494{
1495 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1496 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1497}
1498
1499
1500VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1501{
1502 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1503 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1504 if (iReg == 4 || iReg == 5)
1505 iReg += 2;
1506 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1507 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1508}
1509
1510
1511/**
1512 * Recalculates the hypervisor DRx register values based on current guest
1513 * registers and DBGF breakpoints, updating changed registers depending on the
1514 * context.
1515 *
1516 * This is called whenever a guest DRx register is modified (any context) and
1517 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1518 *
1519 * In raw-mode context this function will reload any (hyper) DRx registers which
1520 * comes out with a different value. It may also have to save the host debug
1521 * registers if that haven't been done already. In this context though, we'll
1522 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1523 * are only important when breakpoints are actually enabled.
1524 *
1525 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1526 * reloaded by the HM code if it changes. Further more, we will only use the
1527 * combined register set when the VBox debugger is actually using hardware BPs,
1528 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1529 * concern us here).
1530 *
1531 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1532 * all the time.
1533 *
1534 * @returns VINF_SUCCESS.
1535 * @param pVCpu The cross context virtual CPU structure.
1536 * @param iGstReg The guest debug register number that was modified.
1537 * UINT8_MAX if not guest register.
1538 * @param fForceHyper Used in HM to force hyper registers because of single
1539 * stepping.
1540 */
1541VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1542{
1543 PVM pVM = pVCpu->CTX_SUFF(pVM);
1544#ifndef IN_RING0
1545 RT_NOREF_PV(iGstReg);
1546#endif
1547
1548 /*
1549 * Compare the DR7s first.
1550 *
1551 * We only care about the enabled flags. GD is virtualized when we
1552 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1553 * always have the LE and GE bits set, so no need to check and disable
1554 * stuff if they're cleared like we have to for the guest DR7.
1555 */
1556 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1557 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1558 uGstDr7 = 0;
1559 else if (!(uGstDr7 & X86_DR7_LE))
1560 uGstDr7 &= ~X86_DR7_LE_ALL;
1561 else if (!(uGstDr7 & X86_DR7_GE))
1562 uGstDr7 &= ~X86_DR7_GE_ALL;
1563
1564 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1565
1566#ifdef IN_RING0
1567 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1568 fForceHyper = true;
1569#endif
1570 if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1571 {
1572 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1573#ifdef IN_RC
1574 bool const fHmEnabled = false;
1575#elif defined(IN_RING3)
1576 bool const fHmEnabled = HMIsEnabled(pVM);
1577#endif
1578
1579 /*
1580 * Ok, something is enabled. Recalc each of the breakpoints, taking
1581 * the VM debugger ones of the guest ones. In raw-mode context we will
1582 * not allow breakpoints with values inside the hypervisor area.
1583 */
1584 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1585
1586 /* bp 0 */
1587 RTGCUINTREG uNewDr0;
1588 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1589 {
1590 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1591 uNewDr0 = DBGFBpGetDR0(pVM);
1592 }
1593 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1594 {
1595 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1596#ifndef IN_RING0
1597 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1598 uNewDr0 = 0;
1599 else
1600#endif
1601 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1602 }
1603 else
1604 uNewDr0 = 0;
1605
1606 /* bp 1 */
1607 RTGCUINTREG uNewDr1;
1608 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1609 {
1610 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1611 uNewDr1 = DBGFBpGetDR1(pVM);
1612 }
1613 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1614 {
1615 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1616#ifndef IN_RING0
1617 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1618 uNewDr1 = 0;
1619 else
1620#endif
1621 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1622 }
1623 else
1624 uNewDr1 = 0;
1625
1626 /* bp 2 */
1627 RTGCUINTREG uNewDr2;
1628 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1629 {
1630 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1631 uNewDr2 = DBGFBpGetDR2(pVM);
1632 }
1633 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1634 {
1635 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1636#ifndef IN_RING0
1637 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1638 uNewDr2 = 0;
1639 else
1640#endif
1641 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1642 }
1643 else
1644 uNewDr2 = 0;
1645
1646 /* bp 3 */
1647 RTGCUINTREG uNewDr3;
1648 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1649 {
1650 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1651 uNewDr3 = DBGFBpGetDR3(pVM);
1652 }
1653 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1654 {
1655 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1656#ifndef IN_RING0
1657 if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1658 uNewDr3 = 0;
1659 else
1660#endif
1661 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1662 }
1663 else
1664 uNewDr3 = 0;
1665
1666 /*
1667 * Apply the updates.
1668 */
1669#ifdef IN_RC
1670 /* Make sure to save host registers first. */
1671 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1672 {
1673 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1674 {
1675 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1676 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1677 }
1678 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1679 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1680 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1681 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1682 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1683
1684 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1685 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1686 ASMSetDR0(uNewDr0);
1687 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1688 ASMSetDR1(uNewDr1);
1689 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1690 ASMSetDR2(uNewDr2);
1691 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1692 ASMSetDR3(uNewDr3);
1693 ASMSetDR6(X86_DR6_INIT_VAL);
1694 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1695 ASMSetDR7(uNewDr7);
1696 }
1697 else
1698#endif
1699 {
1700 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1701 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1702 CPUMSetHyperDR3(pVCpu, uNewDr3);
1703 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1704 CPUMSetHyperDR2(pVCpu, uNewDr2);
1705 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1706 CPUMSetHyperDR1(pVCpu, uNewDr1);
1707 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1708 CPUMSetHyperDR0(pVCpu, uNewDr0);
1709 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1710 CPUMSetHyperDR7(pVCpu, uNewDr7);
1711 }
1712 }
1713#ifdef IN_RING0
1714 else if (CPUMIsGuestDebugStateActive(pVCpu))
1715 {
1716 /*
1717 * Reload the register that was modified. Normally this won't happen
1718 * as we won't intercept DRx writes when not having the hyper debug
1719 * state loaded, but in case we do for some reason we'll simply deal
1720 * with it.
1721 */
1722 switch (iGstReg)
1723 {
1724 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1725 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1726 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1727 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1728 default:
1729 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1730 }
1731 }
1732#endif
1733 else
1734 {
1735 /*
1736 * No active debug state any more. In raw-mode this means we have to
1737 * make sure DR7 has everything disabled now, if we armed it already.
1738 * In ring-0 we might end up here when just single stepping.
1739 */
1740#if defined(IN_RC) || defined(IN_RING0)
1741 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1742 {
1743# ifdef IN_RC
1744 ASMSetDR7(X86_DR7_INIT_VAL);
1745# endif
1746 if (pVCpu->cpum.s.Hyper.dr[0])
1747 ASMSetDR0(0);
1748 if (pVCpu->cpum.s.Hyper.dr[1])
1749 ASMSetDR1(0);
1750 if (pVCpu->cpum.s.Hyper.dr[2])
1751 ASMSetDR2(0);
1752 if (pVCpu->cpum.s.Hyper.dr[3])
1753 ASMSetDR3(0);
1754 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1755 }
1756#endif
1757 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1758
1759 /* Clear all the registers. */
1760 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1761 pVCpu->cpum.s.Hyper.dr[3] = 0;
1762 pVCpu->cpum.s.Hyper.dr[2] = 0;
1763 pVCpu->cpum.s.Hyper.dr[1] = 0;
1764 pVCpu->cpum.s.Hyper.dr[0] = 0;
1765
1766 }
1767 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1768 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1769 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1770 pVCpu->cpum.s.Hyper.dr[7]));
1771
1772 return VINF_SUCCESS;
1773}
1774
1775
1776/**
1777 * Set the guest XCR0 register.
1778 *
1779 * Will load additional state if the FPU state is already loaded (in ring-0 &
1780 * raw-mode context).
1781 *
1782 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1783 * value.
1784 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1785 * @param uNewValue The new value.
1786 * @thread EMT(pVCpu)
1787 */
1788VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1789{
1790 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1791 /* The X87 bit cannot be cleared. */
1792 && (uNewValue & XSAVE_C_X87)
1793 /* AVX requires SSE. */
1794 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1795 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1796 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1797 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1798 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1799 )
1800 {
1801 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1802
1803 /* If more state components are enabled, we need to take care to load
1804 them if the FPU/SSE state is already loaded. May otherwise leak
1805 host state to the guest. */
1806 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1807 if (fNewComponents)
1808 {
1809#if defined(IN_RING0) || defined(IN_RC)
1810 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1811 {
1812 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1813 /* Adding more components. */
1814 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1815 else
1816 {
1817 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1818 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1819 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1820 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1821 }
1822 }
1823#endif
1824 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1825 }
1826 return VINF_SUCCESS;
1827 }
1828 return VERR_CPUM_RAISE_GP_0;
1829}
1830
1831
1832/**
1833 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1834 *
1835 * @returns true if in real mode, otherwise false.
1836 * @param pVCpu The cross context virtual CPU structure.
1837 */
1838VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1839{
1840 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1841}
1842
1843
1844/**
1845 * Tests if the guest has the Page Size Extension enabled (PSE).
1846 *
1847 * @returns true if in real mode, otherwise false.
1848 * @param pVCpu The cross context virtual CPU structure.
1849 */
1850VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1851{
1852 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1853 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1854}
1855
1856
1857/**
1858 * Tests if the guest has the paging enabled (PG).
1859 *
1860 * @returns true if in real mode, otherwise false.
1861 * @param pVCpu The cross context virtual CPU structure.
1862 */
1863VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1864{
1865 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1866}
1867
1868
1869/**
1870 * Tests if the guest has the paging enabled (PG).
1871 *
1872 * @returns true if in real mode, otherwise false.
1873 * @param pVCpu The cross context virtual CPU structure.
1874 */
1875VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1876{
1877 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1878}
1879
1880
1881/**
1882 * Tests if the guest is running in real mode or not.
1883 *
1884 * @returns true if in real mode, otherwise false.
1885 * @param pVCpu The cross context virtual CPU structure.
1886 */
1887VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1888{
1889 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1890}
1891
1892
1893/**
1894 * Tests if the guest is running in real or virtual 8086 mode.
1895 *
1896 * @returns @c true if it is, @c false if not.
1897 * @param pVCpu The cross context virtual CPU structure.
1898 */
1899VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
1900{
1901 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1902 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1903}
1904
1905
1906/**
1907 * Tests if the guest is running in protected or not.
1908 *
1909 * @returns true if in protected mode, otherwise false.
1910 * @param pVCpu The cross context virtual CPU structure.
1911 */
1912VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1913{
1914 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1915}
1916
1917
1918/**
1919 * Tests if the guest is running in paged protected or not.
1920 *
1921 * @returns true if in paged protected mode, otherwise false.
1922 * @param pVCpu The cross context virtual CPU structure.
1923 */
1924VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1925{
1926 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1927}
1928
1929
1930/**
1931 * Tests if the guest is running in long mode or not.
1932 *
1933 * @returns true if in long mode, otherwise false.
1934 * @param pVCpu The cross context virtual CPU structure.
1935 */
1936VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1937{
1938 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1939}
1940
1941
1942/**
1943 * Tests if the guest is running in PAE mode or not.
1944 *
1945 * @returns true if in PAE mode, otherwise false.
1946 * @param pVCpu The cross context virtual CPU structure.
1947 */
1948VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1949{
1950 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1951 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1952 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1953 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1954 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1955}
1956
1957
1958/**
1959 * Tests if the guest is running in 64 bits mode or not.
1960 *
1961 * @returns true if in 64 bits protected mode, otherwise false.
1962 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1963 */
1964VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1965{
1966 if (!CPUMIsGuestInLongMode(pVCpu))
1967 return false;
1968 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1969 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1970}
1971
1972
1973/**
1974 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1975 * registers.
1976 *
1977 * @returns true if in 64 bits protected mode, otherwise false.
1978 * @param pCtx Pointer to the current guest CPU context.
1979 */
1980VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1981{
1982 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1983}
1984
1985#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1986
1987/**
1988 *
1989 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
1990 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
1991 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1992 */
1993VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
1994{
1995 return pVCpu->cpum.s.fRawEntered;
1996}
1997
1998/**
1999 * Transforms the guest CPU state to raw-ring mode.
2000 *
2001 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2002 *
2003 * @returns VBox status code. (recompiler failure)
2004 * @param pVCpu The cross context virtual CPU structure.
2005 * @see @ref pg_raw
2006 */
2007VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2008{
2009 PVM pVM = pVCpu->CTX_SUFF(pVM);
2010
2011 Assert(!pVCpu->cpum.s.fRawEntered);
2012 Assert(!pVCpu->cpum.s.fRemEntered);
2013 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2014
2015 /*
2016 * Are we in Ring-0?
2017 */
2018 if ( pCtx->ss.Sel
2019 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2020 && !pCtx->eflags.Bits.u1VM)
2021 {
2022 /*
2023 * Enter execution mode.
2024 */
2025 PATMRawEnter(pVM, pCtx);
2026
2027 /*
2028 * Set CPL to Ring-1.
2029 */
2030 pCtx->ss.Sel |= 1;
2031 if ( pCtx->cs.Sel
2032 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2033 pCtx->cs.Sel |= 1;
2034 }
2035 else
2036 {
2037# ifdef VBOX_WITH_RAW_RING1
2038 if ( EMIsRawRing1Enabled(pVM)
2039 && !pCtx->eflags.Bits.u1VM
2040 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2041 {
2042 /* Set CPL to Ring-2. */
2043 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2044 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2045 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2046 }
2047# else
2048 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2049 ("ring-1 code not supported\n"));
2050# endif
2051 /*
2052 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2053 */
2054 PATMRawEnter(pVM, pCtx);
2055 }
2056
2057 /*
2058 * Assert sanity.
2059 */
2060 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2061 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2062 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2063 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE));
2064
2065 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2066
2067 pVCpu->cpum.s.fRawEntered = true;
2068 return VINF_SUCCESS;
2069}
2070
2071
2072/**
2073 * Transforms the guest CPU state from raw-ring mode to correct values.
2074 *
2075 * This function will change any selector registers with DPL=1 to DPL=0.
2076 *
2077 * @returns Adjusted rc.
2078 * @param pVCpu The cross context virtual CPU structure.
2079 * @param rc Raw mode return code
2080 * @see @ref pg_raw
2081 */
2082VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2083{
2084 PVM pVM = pVCpu->CTX_SUFF(pVM);
2085
2086 /*
2087 * Don't leave if we've already left (in RC).
2088 */
2089 Assert(!pVCpu->cpum.s.fRemEntered);
2090 if (!pVCpu->cpum.s.fRawEntered)
2091 return rc;
2092 pVCpu->cpum.s.fRawEntered = false;
2093
2094 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2095 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2096 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2097 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2098
2099 /*
2100 * Are we executing in raw ring-1?
2101 */
2102 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2103 && !pCtx->eflags.Bits.u1VM)
2104 {
2105 /*
2106 * Leave execution mode.
2107 */
2108 PATMRawLeave(pVM, pCtx, rc);
2109 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2110 /** @todo See what happens if we remove this. */
2111 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2112 pCtx->ds.Sel &= ~X86_SEL_RPL;
2113 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2114 pCtx->es.Sel &= ~X86_SEL_RPL;
2115 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2116 pCtx->fs.Sel &= ~X86_SEL_RPL;
2117 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2118 pCtx->gs.Sel &= ~X86_SEL_RPL;
2119
2120 /*
2121 * Ring-1 selector => Ring-0.
2122 */
2123 pCtx->ss.Sel &= ~X86_SEL_RPL;
2124 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2125 pCtx->cs.Sel &= ~X86_SEL_RPL;
2126 }
2127 else
2128 {
2129 /*
2130 * PATM is taking care of the IOPL and IF flags for us.
2131 */
2132 PATMRawLeave(pVM, pCtx, rc);
2133 if (!pCtx->eflags.Bits.u1VM)
2134 {
2135# ifdef VBOX_WITH_RAW_RING1
2136 if ( EMIsRawRing1Enabled(pVM)
2137 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2138 {
2139 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2140 /** @todo See what happens if we remove this. */
2141 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2142 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2143 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2144 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2145 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2146 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2147 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2148 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2149
2150 /*
2151 * Ring-2 selector => Ring-1.
2152 */
2153 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2154 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2155 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2156 }
2157 else
2158 {
2159# endif
2160 /** @todo See what happens if we remove this. */
2161 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2162 pCtx->ds.Sel &= ~X86_SEL_RPL;
2163 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2164 pCtx->es.Sel &= ~X86_SEL_RPL;
2165 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2166 pCtx->fs.Sel &= ~X86_SEL_RPL;
2167 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2168 pCtx->gs.Sel &= ~X86_SEL_RPL;
2169# ifdef VBOX_WITH_RAW_RING1
2170 }
2171# endif
2172 }
2173 }
2174
2175 return rc;
2176}
2177
2178#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2179
2180/**
2181 * Updates the EFLAGS while we're in raw-mode.
2182 *
2183 * @param pVCpu The cross context virtual CPU structure.
2184 * @param fEfl The new EFLAGS value.
2185 */
2186VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2187{
2188#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2189 if (pVCpu->cpum.s.fRawEntered)
2190 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2191 else
2192#endif
2193 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2194}
2195
2196
2197/**
2198 * Gets the EFLAGS while we're in raw-mode.
2199 *
2200 * @returns The eflags.
2201 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2202 */
2203VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2204{
2205#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2206 if (pVCpu->cpum.s.fRawEntered)
2207 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2208#endif
2209 return pVCpu->cpum.s.Guest.eflags.u32;
2210}
2211
2212
2213/**
2214 * Sets the specified changed flags (CPUM_CHANGED_*).
2215 *
2216 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2217 * @param fChangedAdd The changed flags to add.
2218 */
2219VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2220{
2221 pVCpu->cpum.s.fChanged |= fChangedAdd;
2222}
2223
2224
2225/**
2226 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2227 *
2228 * @returns true if supported.
2229 * @returns false if not supported.
2230 * @param pVM The cross context VM structure.
2231 */
2232VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2233{
2234 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2235}
2236
2237
2238/**
2239 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2240 * @returns true if used.
2241 * @returns false if not used.
2242 * @param pVM The cross context VM structure.
2243 */
2244VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2245{
2246 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2247}
2248
2249
2250/**
2251 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2252 * @returns true if used.
2253 * @returns false if not used.
2254 * @param pVM The cross context VM structure.
2255 */
2256VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2257{
2258 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2259}
2260
2261#ifdef IN_RC
2262
2263/**
2264 * Lazily sync in the FPU/XMM state.
2265 *
2266 * @returns VBox status code.
2267 * @param pVCpu The cross context virtual CPU structure.
2268 */
2269VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2270{
2271 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2272}
2273
2274#endif /* !IN_RC */
2275
2276/**
2277 * Checks if we activated the FPU/XMM state of the guest OS.
2278 *
2279 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2280 * time we'll be executing guest code, so it may return true for 64-on-32 when
2281 * we still haven't actually loaded the FPU status, just scheduled it to be
2282 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2283 *
2284 * @returns true / false.
2285 * @param pVCpu The cross context virtual CPU structure.
2286 */
2287VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2288{
2289 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2290}
2291
2292
2293/**
2294 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2295 *
2296 * @returns true / false.
2297 * @param pVCpu The cross context virtual CPU structure.
2298 */
2299VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2300{
2301 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2302}
2303
2304
2305/**
2306 * Checks if we saved the FPU/XMM state of the host OS.
2307 *
2308 * @returns true / false.
2309 * @param pVCpu The cross context virtual CPU structure.
2310 */
2311VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2312{
2313 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2314}
2315
2316
2317/**
2318 * Checks if the guest debug state is active.
2319 *
2320 * @returns boolean
2321 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2322 */
2323VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2324{
2325 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2326}
2327
2328
2329/**
2330 * Checks if the guest debug state is to be made active during the world-switch
2331 * (currently only used for the 32->64 switcher case).
2332 *
2333 * @returns boolean
2334 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2335 */
2336VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2337{
2338 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2339}
2340
2341
2342/**
2343 * Checks if the hyper debug state is active.
2344 *
2345 * @returns boolean
2346 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2347 */
2348VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2349{
2350 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2351}
2352
2353
2354/**
2355 * Checks if the hyper debug state is to be made active during the world-switch
2356 * (currently only used for the 32->64 switcher case).
2357 *
2358 * @returns boolean
2359 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2360 */
2361VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2362{
2363 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2364}
2365
2366
2367/**
2368 * Mark the guest's debug state as inactive.
2369 *
2370 * @returns boolean
2371 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2372 * @todo This API doesn't make sense any more.
2373 */
2374VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2375{
2376 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2377 NOREF(pVCpu);
2378}
2379
2380
2381/**
2382 * Get the current privilege level of the guest.
2383 *
2384 * @returns CPL
2385 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2386 */
2387VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2388{
2389 /*
2390 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2391 *
2392 * Note! We used to check CS.DPL here, assuming it was always equal to
2393 * CPL even if a conforming segment was loaded. But this truned out to
2394 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2395 * during install after a far call to ring 2 with VT-x. Then on newer
2396 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2397 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2398 *
2399 * So, forget CS.DPL, always use SS.DPL.
2400 *
2401 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2402 * isn't necessarily equal if the segment is conforming.
2403 * See section 4.11.1 in the AMD manual.
2404 *
2405 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2406 * right after real->prot mode switch and when in V8086 mode? That
2407 * section says the RPL specified in a direct transfere (call, jmp,
2408 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2409 * it would be impossible for an exception handle or the iret
2410 * instruction to figure out whether SS:ESP are part of the frame
2411 * or not. VBox or qemu bug must've lead to this misconception.
2412 *
2413 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2414 * selector into SS with an RPL other than the CPL when CPL != 3 and
2415 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2416 * RPL = CPL. Weird.
2417 */
2418 uint32_t uCpl;
2419 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2420 {
2421 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2422 {
2423 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2424 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2425 else
2426 {
2427 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2428#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2429# ifdef VBOX_WITH_RAW_RING1
2430 if (pVCpu->cpum.s.fRawEntered)
2431 {
2432 if ( uCpl == 2
2433 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2434 uCpl = 1;
2435 else if (uCpl == 1)
2436 uCpl = 0;
2437 }
2438 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2439# else
2440 if (uCpl == 1)
2441 uCpl = 0;
2442# endif
2443#endif
2444 }
2445 }
2446 else
2447 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2448 }
2449 else
2450 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2451 return uCpl;
2452}
2453
2454
2455/**
2456 * Gets the current guest CPU mode.
2457 *
2458 * If paging mode is what you need, check out PGMGetGuestMode().
2459 *
2460 * @returns The CPU mode.
2461 * @param pVCpu The cross context virtual CPU structure.
2462 */
2463VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2464{
2465 CPUMMODE enmMode;
2466 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2467 enmMode = CPUMMODE_REAL;
2468 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2469 enmMode = CPUMMODE_PROTECTED;
2470 else
2471 enmMode = CPUMMODE_LONG;
2472
2473 return enmMode;
2474}
2475
2476
2477/**
2478 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2479 *
2480 * @returns 16, 32 or 64.
2481 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2482 */
2483VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2484{
2485 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2486 return 16;
2487
2488 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2489 {
2490 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2491 return 16;
2492 }
2493
2494 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2495 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2496 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2497 return 64;
2498
2499 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2500 return 32;
2501
2502 return 16;
2503}
2504
2505
2506VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2507{
2508 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2509 return DISCPUMODE_16BIT;
2510
2511 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2512 {
2513 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2514 return DISCPUMODE_16BIT;
2515 }
2516
2517 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2518 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2519 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2520 return DISCPUMODE_64BIT;
2521
2522 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2523 return DISCPUMODE_32BIT;
2524
2525 return DISCPUMODE_16BIT;
2526}
2527
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette