VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 79202

Last change on this file since 79202 was 79202, checked in by vboxsync, 5 years ago

VMM: Nested VMX: bugref:9180 VMCS shadowing, work in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 118.1 KB
Line 
1/* $Id: CPUMAllRegs.cpp 79202 2019-06-18 09:13:29Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/apic.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#ifndef IN_RC
31# include <VBox/vmm/nem.h>
32# include <VBox/vmm/hm.h>
33#endif
34#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
35# include <VBox/vmm/selm.h>
36#endif
37#include "CPUMInternal.h"
38#include <VBox/vmm/vm.h>
39#include <VBox/err.h>
40#include <VBox/dis.h>
41#include <VBox/log.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/tm.h>
44#include <iprt/assert.h>
45#include <iprt/asm.h>
46#include <iprt/asm-amd64-x86.h>
47#ifdef IN_RING3
48# include <iprt/thread.h>
49#endif
50
51/** Disable stack frame pointer generation here. */
52#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
53# pragma optimize("y", off)
54#endif
55
56AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
57AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63/**
64 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
65 *
66 * @returns Pointer to the Virtual CPU.
67 * @param a_pGuestCtx Pointer to the guest context.
68 */
69#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
70
71/**
72 * Lazily loads the hidden parts of a selector register when using raw-mode.
73 */
74#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
75# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
76 do \
77 { \
78 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
79 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
80 } while (0)
81#else
82# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
83 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
84#endif
85
86/** @def CPUM_INT_ASSERT_NOT_EXTRN
87 * Macro for asserting that @a a_fNotExtrn are present.
88 *
89 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
91 */
92#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
93 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
94 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
95
96
97
98
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100
101/**
102 * Does the lazy hidden selector register loading.
103 *
104 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
105 * @param pSReg The selector register to lazily load hidden parts of.
106 */
107static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
108{
109 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
110 Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
111 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
112
113 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
114 {
115 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
116 pSReg->Attr.u = 0;
117 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
118 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
119 pSReg->Attr.n.u2Dpl = 3;
120 pSReg->Attr.n.u1Present = 1;
121 pSReg->u32Limit = 0x0000ffff;
122 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
123 pSReg->ValidSel = pSReg->Sel;
124 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
125 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
126 }
127 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
128 {
129 /* Real mode - leave the limit and flags alone here, at least for now. */
130 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
131 pSReg->ValidSel = pSReg->Sel;
132 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
133 }
134 else
135 {
136 /* Protected mode - get it from the selector descriptor tables. */
137 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
138 {
139 Assert(!CPUMIsGuestInLongMode(pVCpu));
140 pSReg->Sel = 0;
141 pSReg->u64Base = 0;
142 pSReg->u32Limit = 0;
143 pSReg->Attr.u = 0;
144 pSReg->ValidSel = 0;
145 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
146 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
147 }
148 else
149 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
150 }
151}
152
153
154/**
155 * Makes sure the hidden CS and SS selector registers are valid, loading them if
156 * necessary.
157 *
158 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
159 */
160VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
161{
162 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
163 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
164}
165
166
167/**
168 * Loads a the hidden parts of a selector register.
169 *
170 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
171 * @param pSReg The selector register to lazily load hidden parts of.
172 */
173VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
174{
175 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
176}
177
178#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
179
180
181/**
182 * Obsolete.
183 *
184 * We don't support nested hypervisor context interrupts or traps. Life is much
185 * simpler when we don't. It's also slightly faster at times.
186 *
187 * @param pVCpu The cross context virtual CPU structure.
188 */
189VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
190{
191 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
192}
193
194
195/**
196 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
197 *
198 * @param pVCpu The cross context virtual CPU structure.
199 */
200VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
201{
202 return &pVCpu->cpum.s.Hyper;
203}
204
205
206VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
207{
208 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
209 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
210}
211
212
213VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
214{
215 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
216 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
217}
218
219
220VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
221{
222 pVCpu->cpum.s.Hyper.cr3 = cr3;
223
224#ifdef IN_RC
225 /* Update the current CR3. */
226 ASMSetCR3(cr3);
227#endif
228}
229
230VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
231{
232 return pVCpu->cpum.s.Hyper.cr3;
233}
234
235
236VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
237{
238 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
239}
240
241
242VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
243{
244 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
245}
246
247
248VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
249{
250 pVCpu->cpum.s.Hyper.es.Sel = SelES;
251}
252
253
254VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
255{
256 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
257}
258
259
260VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
261{
262 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
263}
264
265
266VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
267{
268 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
269}
270
271
272VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
273{
274 pVCpu->cpum.s.Hyper.esp = u32ESP;
275}
276
277
278VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
279{
280 pVCpu->cpum.s.Hyper.esp = u32ESP;
281}
282
283
284VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
285{
286 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
287 return VINF_SUCCESS;
288}
289
290
291VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
292{
293 pVCpu->cpum.s.Hyper.eip = u32EIP;
294}
295
296
297/**
298 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
299 * EFLAGS and EIP prior to resuming guest execution.
300 *
301 * All general register not given as a parameter will be set to 0. The EFLAGS
302 * register will be set to sane values for C/C++ code execution with interrupts
303 * disabled and IOPL 0.
304 *
305 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
306 * @param u32EIP The EIP value.
307 * @param u32ESP The ESP value.
308 * @param u32EAX The EAX value.
309 * @param u32EDX The EDX value.
310 */
311VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
312{
313 pVCpu->cpum.s.Hyper.eip = u32EIP;
314 pVCpu->cpum.s.Hyper.esp = u32ESP;
315 pVCpu->cpum.s.Hyper.eax = u32EAX;
316 pVCpu->cpum.s.Hyper.edx = u32EDX;
317 pVCpu->cpum.s.Hyper.ecx = 0;
318 pVCpu->cpum.s.Hyper.ebx = 0;
319 pVCpu->cpum.s.Hyper.ebp = 0;
320 pVCpu->cpum.s.Hyper.esi = 0;
321 pVCpu->cpum.s.Hyper.edi = 0;
322 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
323}
324
325
326VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
327{
328 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
329}
330
331
332VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
333{
334 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
335}
336
337
338/** @def MAYBE_LOAD_DRx
339 * Macro for updating DRx values in raw-mode and ring-0 contexts.
340 */
341#ifdef IN_RING0
342# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
343# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
344 do { \
345 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
346 a_fnLoad(a_uValue); \
347 else \
348 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
349 } while (0)
350# else
351# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
352 do { \
353 a_fnLoad(a_uValue); \
354 } while (0)
355# endif
356
357#elif defined(IN_RC)
358# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
359 do { \
360 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
361 { a_fnLoad(a_uValue); } \
362 } while (0)
363
364#else
365# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
366#endif
367
368VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
369{
370 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
371 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
372}
373
374
375VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
376{
377 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
378 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
379}
380
381
382VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
383{
384 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
385 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
386}
387
388
389VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
390{
391 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
392 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
393}
394
395
396VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
397{
398 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
399}
400
401
402VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
403{
404 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
405#ifdef IN_RC
406 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
407#endif
408}
409
410
411VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
412{
413 return pVCpu->cpum.s.Hyper.cs.Sel;
414}
415
416
417VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
418{
419 return pVCpu->cpum.s.Hyper.ds.Sel;
420}
421
422
423VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
424{
425 return pVCpu->cpum.s.Hyper.es.Sel;
426}
427
428
429VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
430{
431 return pVCpu->cpum.s.Hyper.fs.Sel;
432}
433
434
435VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
436{
437 return pVCpu->cpum.s.Hyper.gs.Sel;
438}
439
440
441VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
442{
443 return pVCpu->cpum.s.Hyper.ss.Sel;
444}
445
446
447VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
448{
449 return pVCpu->cpum.s.Hyper.eax;
450}
451
452
453VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
454{
455 return pVCpu->cpum.s.Hyper.ebx;
456}
457
458
459VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
460{
461 return pVCpu->cpum.s.Hyper.ecx;
462}
463
464
465VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
466{
467 return pVCpu->cpum.s.Hyper.edx;
468}
469
470
471VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
472{
473 return pVCpu->cpum.s.Hyper.esi;
474}
475
476
477VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
478{
479 return pVCpu->cpum.s.Hyper.edi;
480}
481
482
483VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
484{
485 return pVCpu->cpum.s.Hyper.ebp;
486}
487
488
489VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
490{
491 return pVCpu->cpum.s.Hyper.esp;
492}
493
494
495VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
496{
497 return pVCpu->cpum.s.Hyper.eflags.u32;
498}
499
500
501VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
502{
503 return pVCpu->cpum.s.Hyper.eip;
504}
505
506
507VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
508{
509 return pVCpu->cpum.s.Hyper.rip;
510}
511
512
513VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
514{
515 if (pcbLimit)
516 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
517 return pVCpu->cpum.s.Hyper.idtr.pIdt;
518}
519
520
521VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
522{
523 if (pcbLimit)
524 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
525 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
526}
527
528
529VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
530{
531 return pVCpu->cpum.s.Hyper.ldtr.Sel;
532}
533
534
535VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
536{
537 return pVCpu->cpum.s.Hyper.dr[0];
538}
539
540
541VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
542{
543 return pVCpu->cpum.s.Hyper.dr[1];
544}
545
546
547VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
548{
549 return pVCpu->cpum.s.Hyper.dr[2];
550}
551
552
553VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
554{
555 return pVCpu->cpum.s.Hyper.dr[3];
556}
557
558
559VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
560{
561 return pVCpu->cpum.s.Hyper.dr[6];
562}
563
564
565VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
566{
567 return pVCpu->cpum.s.Hyper.dr[7];
568}
569
570
571/**
572 * Gets the pointer to the internal CPUMCTXCORE structure.
573 * This is only for reading in order to save a few calls.
574 *
575 * @param pVCpu The cross context virtual CPU structure.
576 */
577VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
578{
579 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
580}
581
582
583/**
584 * Queries the pointer to the internal CPUMCTX structure.
585 *
586 * @returns The CPUMCTX pointer.
587 * @param pVCpu The cross context virtual CPU structure.
588 */
589VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
590{
591 return &pVCpu->cpum.s.Guest;
592}
593
594
595/**
596 * Queries the pointer to the internal CPUMCTXMSRS structure.
597 *
598 * This is for NEM only.
599 *
600 * @returns The CPUMCTX pointer.
601 * @param pVCpu The cross context virtual CPU structure.
602 */
603VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
604{
605 return &pVCpu->cpum.s.GuestMsrs;
606}
607
608
609VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
610{
611#ifdef VBOX_WITH_RAW_MODE_NOT_R0
612 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
613 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
614#endif
615 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
616 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
617 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
618 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
619 return VINF_SUCCESS; /* formality, consider it void. */
620}
621
622
623VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
624{
625#ifdef VBOX_WITH_RAW_MODE_NOT_R0
626 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
627 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
628#endif
629 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
630 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
631 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
632 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
633 return VINF_SUCCESS; /* formality, consider it void. */
634}
635
636
637VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
638{
639#ifdef VBOX_WITH_RAW_MODE_NOT_R0
640 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
641 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
642#endif
643 pVCpu->cpum.s.Guest.tr.Sel = tr;
644 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
645 return VINF_SUCCESS; /* formality, consider it void. */
646}
647
648
649VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
650{
651#ifdef VBOX_WITH_RAW_MODE_NOT_R0
652 if ( ( ldtr != 0
653 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
654 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
655 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
656#endif
657 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
658 /* The caller will set more hidden bits if it has them. */
659 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
660 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
661 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
662 return VINF_SUCCESS; /* formality, consider it void. */
663}
664
665
666/**
667 * Set the guest CR0.
668 *
669 * When called in GC, the hyper CR0 may be updated if that is
670 * required. The caller only has to take special action if AM,
671 * WP, PG or PE changes.
672 *
673 * @returns VINF_SUCCESS (consider it void).
674 * @param pVCpu The cross context virtual CPU structure.
675 * @param cr0 The new CR0 value.
676 */
677VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
678{
679#ifdef IN_RC
680 /*
681 * Check if we need to change hypervisor CR0 because
682 * of math stuff.
683 */
684 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
685 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
686 {
687 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
688 {
689 /*
690 * We haven't loaded the guest FPU state yet, so TS and MT are both set
691 * and EM should be reflecting the guest EM (it always does this).
692 */
693 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
694 {
695 uint32_t HyperCR0 = ASMGetCR0();
696 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
697 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
698 HyperCR0 &= ~X86_CR0_EM;
699 HyperCR0 |= cr0 & X86_CR0_EM;
700 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
701 ASMSetCR0(HyperCR0);
702 }
703# ifdef VBOX_STRICT
704 else
705 {
706 uint32_t HyperCR0 = ASMGetCR0();
707 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
708 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
709 }
710# endif
711 }
712 else
713 {
714 /*
715 * Already loaded the guest FPU state, so we're just mirroring
716 * the guest flags.
717 */
718 uint32_t HyperCR0 = ASMGetCR0();
719 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
720 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
721 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
722 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
723 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
724 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
725 ASMSetCR0(HyperCR0);
726 }
727 }
728#endif /* IN_RC */
729
730 /*
731 * Check for changes causing TLB flushes (for REM).
732 * The caller is responsible for calling PGM when appropriate.
733 */
734 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
735 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
736 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
737 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
738
739 /*
740 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
741 */
742 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
743 PGMCr0WpEnabled(pVCpu);
744
745 /* The ET flag is settable on a 386 and hardwired on 486+. */
746 if ( !(cr0 & X86_CR0_ET)
747 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
748 cr0 |= X86_CR0_ET;
749
750 pVCpu->cpum.s.Guest.cr0 = cr0;
751 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
752 return VINF_SUCCESS;
753}
754
755
756VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
757{
758 pVCpu->cpum.s.Guest.cr2 = cr2;
759 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
760 return VINF_SUCCESS;
761}
762
763
764VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
765{
766 pVCpu->cpum.s.Guest.cr3 = cr3;
767 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
768 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
769 return VINF_SUCCESS;
770}
771
772
773VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
774{
775 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
776
777 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
778 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
779 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
780
781 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
782 pVCpu->cpum.s.Guest.cr4 = cr4;
783 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
784 return VINF_SUCCESS;
785}
786
787
788VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
789{
790 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
791 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
792 return VINF_SUCCESS;
793}
794
795
796VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
797{
798 pVCpu->cpum.s.Guest.eip = eip;
799 return VINF_SUCCESS;
800}
801
802
803VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
804{
805 pVCpu->cpum.s.Guest.eax = eax;
806 return VINF_SUCCESS;
807}
808
809
810VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
811{
812 pVCpu->cpum.s.Guest.ebx = ebx;
813 return VINF_SUCCESS;
814}
815
816
817VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
818{
819 pVCpu->cpum.s.Guest.ecx = ecx;
820 return VINF_SUCCESS;
821}
822
823
824VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
825{
826 pVCpu->cpum.s.Guest.edx = edx;
827 return VINF_SUCCESS;
828}
829
830
831VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
832{
833 pVCpu->cpum.s.Guest.esp = esp;
834 return VINF_SUCCESS;
835}
836
837
838VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
839{
840 pVCpu->cpum.s.Guest.ebp = ebp;
841 return VINF_SUCCESS;
842}
843
844
845VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
846{
847 pVCpu->cpum.s.Guest.esi = esi;
848 return VINF_SUCCESS;
849}
850
851
852VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
853{
854 pVCpu->cpum.s.Guest.edi = edi;
855 return VINF_SUCCESS;
856}
857
858
859VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
860{
861 pVCpu->cpum.s.Guest.ss.Sel = ss;
862 return VINF_SUCCESS;
863}
864
865
866VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
867{
868 pVCpu->cpum.s.Guest.cs.Sel = cs;
869 return VINF_SUCCESS;
870}
871
872
873VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
874{
875 pVCpu->cpum.s.Guest.ds.Sel = ds;
876 return VINF_SUCCESS;
877}
878
879
880VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
881{
882 pVCpu->cpum.s.Guest.es.Sel = es;
883 return VINF_SUCCESS;
884}
885
886
887VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
888{
889 pVCpu->cpum.s.Guest.fs.Sel = fs;
890 return VINF_SUCCESS;
891}
892
893
894VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
895{
896 pVCpu->cpum.s.Guest.gs.Sel = gs;
897 return VINF_SUCCESS;
898}
899
900
901VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
902{
903 pVCpu->cpum.s.Guest.msrEFER = val;
904 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
905}
906
907
908VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
909{
910 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
911 if (pcbLimit)
912 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
913 return pVCpu->cpum.s.Guest.idtr.pIdt;
914}
915
916
917VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
918{
919 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
920 if (pHidden)
921 *pHidden = pVCpu->cpum.s.Guest.tr;
922 return pVCpu->cpum.s.Guest.tr.Sel;
923}
924
925
926VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
927{
928 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
929 return pVCpu->cpum.s.Guest.cs.Sel;
930}
931
932
933VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
934{
935 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
936 return pVCpu->cpum.s.Guest.ds.Sel;
937}
938
939
940VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
941{
942 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
943 return pVCpu->cpum.s.Guest.es.Sel;
944}
945
946
947VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
948{
949 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
950 return pVCpu->cpum.s.Guest.fs.Sel;
951}
952
953
954VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
955{
956 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
957 return pVCpu->cpum.s.Guest.gs.Sel;
958}
959
960
961VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
962{
963 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
964 return pVCpu->cpum.s.Guest.ss.Sel;
965}
966
967
968VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
969{
970 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
971 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
972 if ( !CPUMIsGuestInLongMode(pVCpu)
973 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
974 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
975 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
976}
977
978
979VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
980{
981 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
982 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
983 if ( !CPUMIsGuestInLongMode(pVCpu)
984 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
985 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
986 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
987}
988
989
990VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
991{
992 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
993 return pVCpu->cpum.s.Guest.ldtr.Sel;
994}
995
996
997VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
998{
999 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
1000 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
1001 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
1002 return pVCpu->cpum.s.Guest.ldtr.Sel;
1003}
1004
1005
1006VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
1007{
1008 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1009 return pVCpu->cpum.s.Guest.cr0;
1010}
1011
1012
1013VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
1014{
1015 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
1016 return pVCpu->cpum.s.Guest.cr2;
1017}
1018
1019
1020VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
1021{
1022 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
1023 return pVCpu->cpum.s.Guest.cr3;
1024}
1025
1026
1027VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
1028{
1029 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1030 return pVCpu->cpum.s.Guest.cr4;
1031}
1032
1033
1034VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPU pVCpu)
1035{
1036 uint64_t u64;
1037 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1038 if (RT_FAILURE(rc))
1039 u64 = 0;
1040 return u64;
1041}
1042
1043
1044VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
1045{
1046 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
1047 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1048}
1049
1050
1051VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
1052{
1053 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1054 return pVCpu->cpum.s.Guest.eip;
1055}
1056
1057
1058VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
1059{
1060 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1061 return pVCpu->cpum.s.Guest.rip;
1062}
1063
1064
1065VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
1066{
1067 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
1068 return pVCpu->cpum.s.Guest.eax;
1069}
1070
1071
1072VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
1073{
1074 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
1075 return pVCpu->cpum.s.Guest.ebx;
1076}
1077
1078
1079VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
1080{
1081 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
1082 return pVCpu->cpum.s.Guest.ecx;
1083}
1084
1085
1086VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
1087{
1088 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
1089 return pVCpu->cpum.s.Guest.edx;
1090}
1091
1092
1093VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
1094{
1095 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
1096 return pVCpu->cpum.s.Guest.esi;
1097}
1098
1099
1100VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
1101{
1102 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
1103 return pVCpu->cpum.s.Guest.edi;
1104}
1105
1106
1107VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
1108{
1109 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
1110 return pVCpu->cpum.s.Guest.esp;
1111}
1112
1113
1114VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
1115{
1116 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
1117 return pVCpu->cpum.s.Guest.ebp;
1118}
1119
1120
1121VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
1122{
1123 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1124 return pVCpu->cpum.s.Guest.eflags.u32;
1125}
1126
1127
1128VMMDECL(int) CPUMGetGuestCRx(PCVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1129{
1130 switch (iReg)
1131 {
1132 case DISCREG_CR0:
1133 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1134 *pValue = pVCpu->cpum.s.Guest.cr0;
1135 break;
1136
1137 case DISCREG_CR2:
1138 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
1139 *pValue = pVCpu->cpum.s.Guest.cr2;
1140 break;
1141
1142 case DISCREG_CR3:
1143 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
1144 *pValue = pVCpu->cpum.s.Guest.cr3;
1145 break;
1146
1147 case DISCREG_CR4:
1148 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1149 *pValue = pVCpu->cpum.s.Guest.cr4;
1150 break;
1151
1152 case DISCREG_CR8:
1153 {
1154 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1155 uint8_t u8Tpr;
1156 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1157 if (RT_FAILURE(rc))
1158 {
1159 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1160 *pValue = 0;
1161 return rc;
1162 }
1163 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
1164 break;
1165 }
1166
1167 default:
1168 return VERR_INVALID_PARAMETER;
1169 }
1170 return VINF_SUCCESS;
1171}
1172
1173
1174VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
1175{
1176 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1177 return pVCpu->cpum.s.Guest.dr[0];
1178}
1179
1180
1181VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
1182{
1183 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1184 return pVCpu->cpum.s.Guest.dr[1];
1185}
1186
1187
1188VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
1189{
1190 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1191 return pVCpu->cpum.s.Guest.dr[2];
1192}
1193
1194
1195VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
1196{
1197 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1198 return pVCpu->cpum.s.Guest.dr[3];
1199}
1200
1201
1202VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
1203{
1204 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
1205 return pVCpu->cpum.s.Guest.dr[6];
1206}
1207
1208
1209VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
1210{
1211 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
1212 return pVCpu->cpum.s.Guest.dr[7];
1213}
1214
1215
1216VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1217{
1218 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
1219 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1220 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1221 if (iReg == 4 || iReg == 5)
1222 iReg += 2;
1223 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1224 return VINF_SUCCESS;
1225}
1226
1227
1228VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
1229{
1230 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1231 return pVCpu->cpum.s.Guest.msrEFER;
1232}
1233
1234
1235/**
1236 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1237 *
1238 * @returns Pointer to the leaf if found, NULL if not.
1239 *
1240 * @param pVM The cross context VM structure.
1241 * @param uLeaf The leaf to get.
1242 */
1243PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1244{
1245 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1246 if (iEnd)
1247 {
1248 unsigned iStart = 0;
1249 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1250 for (;;)
1251 {
1252 unsigned i = iStart + (iEnd - iStart) / 2U;
1253 if (uLeaf < paLeaves[i].uLeaf)
1254 {
1255 if (i <= iStart)
1256 return NULL;
1257 iEnd = i;
1258 }
1259 else if (uLeaf > paLeaves[i].uLeaf)
1260 {
1261 i += 1;
1262 if (i >= iEnd)
1263 return NULL;
1264 iStart = i;
1265 }
1266 else
1267 {
1268 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1269 return &paLeaves[i];
1270
1271 /* This shouldn't normally happen. But in case the it does due
1272 to user configuration overrids or something, just return the
1273 first sub-leaf. */
1274 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1275 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1276 while ( paLeaves[i].uSubLeaf != 0
1277 && i > 0
1278 && uLeaf == paLeaves[i - 1].uLeaf)
1279 i--;
1280 return &paLeaves[i];
1281 }
1282 }
1283 }
1284
1285 return NULL;
1286}
1287
1288
1289/**
1290 * Looks up a CPUID leaf in the CPUID leaf array.
1291 *
1292 * @returns Pointer to the leaf if found, NULL if not.
1293 *
1294 * @param pVM The cross context VM structure.
1295 * @param uLeaf The leaf to get.
1296 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1297 * isn't.
1298 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1299 */
1300PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1301{
1302 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1303 if (iEnd)
1304 {
1305 unsigned iStart = 0;
1306 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1307 for (;;)
1308 {
1309 unsigned i = iStart + (iEnd - iStart) / 2U;
1310 if (uLeaf < paLeaves[i].uLeaf)
1311 {
1312 if (i <= iStart)
1313 return NULL;
1314 iEnd = i;
1315 }
1316 else if (uLeaf > paLeaves[i].uLeaf)
1317 {
1318 i += 1;
1319 if (i >= iEnd)
1320 return NULL;
1321 iStart = i;
1322 }
1323 else
1324 {
1325 uSubLeaf &= paLeaves[i].fSubLeafMask;
1326 if (uSubLeaf == paLeaves[i].uSubLeaf)
1327 *pfExactSubLeafHit = true;
1328 else
1329 {
1330 /* Find the right subleaf. We return the last one before
1331 uSubLeaf if we don't find an exact match. */
1332 if (uSubLeaf < paLeaves[i].uSubLeaf)
1333 while ( i > 0
1334 && uLeaf == paLeaves[i - 1].uLeaf
1335 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1336 i--;
1337 else
1338 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1339 && uLeaf == paLeaves[i + 1].uLeaf
1340 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1341 i++;
1342 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1343 }
1344 return &paLeaves[i];
1345 }
1346 }
1347 }
1348
1349 *pfExactSubLeafHit = false;
1350 return NULL;
1351}
1352
1353
1354/**
1355 * Gets a CPUID leaf.
1356 *
1357 * @param pVCpu The cross context virtual CPU structure.
1358 * @param uLeaf The CPUID leaf to get.
1359 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1360 * @param pEax Where to store the EAX value.
1361 * @param pEbx Where to store the EBX value.
1362 * @param pEcx Where to store the ECX value.
1363 * @param pEdx Where to store the EDX value.
1364 */
1365VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1366 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1367{
1368 bool fExactSubLeafHit;
1369 PVM pVM = pVCpu->CTX_SUFF(pVM);
1370 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1371 if (pLeaf)
1372 {
1373 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1374 if (fExactSubLeafHit)
1375 {
1376 *pEax = pLeaf->uEax;
1377 *pEbx = pLeaf->uEbx;
1378 *pEcx = pLeaf->uEcx;
1379 *pEdx = pLeaf->uEdx;
1380
1381 /*
1382 * Deal with CPU specific information.
1383 */
1384 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1385 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1386 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1387 {
1388 if (uLeaf == 1)
1389 {
1390 /* EBX: Bits 31-24: Initial APIC ID. */
1391 Assert(pVCpu->idCpu <= 255);
1392 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1393 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1394
1395 /* EDX: Bit 9: AND with APICBASE.EN. */
1396 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1397 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1398
1399 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1400 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1401 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1402 }
1403 else if (uLeaf == 0xb)
1404 {
1405 /* EDX: Initial extended APIC ID. */
1406 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1407 *pEdx = pVCpu->idCpu;
1408 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1409 }
1410 else if (uLeaf == UINT32_C(0x8000001e))
1411 {
1412 /* EAX: Initial extended APIC ID. */
1413 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1414 *pEax = pVCpu->idCpu;
1415 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1416 }
1417 else if (uLeaf == UINT32_C(0x80000001))
1418 {
1419 /* EDX: Bit 9: AND with APICBASE.EN. */
1420 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1421 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1422 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1423 }
1424 else
1425 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1426 }
1427 }
1428 /*
1429 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1430 * them here, but we do the best we can here...
1431 */
1432 else
1433 {
1434 *pEax = *pEbx = *pEcx = *pEdx = 0;
1435 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1436 {
1437 *pEcx = uSubLeaf & 0xff;
1438 *pEdx = pVCpu->idCpu;
1439 }
1440 }
1441 }
1442 else
1443 {
1444 /*
1445 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1446 */
1447 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1448 {
1449 default:
1450 AssertFailed();
1451 RT_FALL_THRU();
1452 case CPUMUNKNOWNCPUID_DEFAULTS:
1453 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1454 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1455 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1456 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1457 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1458 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1459 break;
1460 case CPUMUNKNOWNCPUID_PASSTHRU:
1461 *pEax = uLeaf;
1462 *pEbx = 0;
1463 *pEcx = uSubLeaf;
1464 *pEdx = 0;
1465 break;
1466 }
1467 }
1468 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1469}
1470
1471
1472/**
1473 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1474 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1475 *
1476 * @returns Previous value.
1477 * @param pVCpu The cross context virtual CPU structure to make the
1478 * change on. Usually the calling EMT.
1479 * @param fVisible Whether to make it visible (true) or hide it (false).
1480 *
1481 * @remarks This is "VMMDECL" so that it still links with
1482 * the old APIC code which is in VBoxDD2 and not in
1483 * the VMM module.
1484 */
1485VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1486{
1487 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1488 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1489
1490#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1491 /*
1492 * Patch manager saved state legacy pain.
1493 */
1494 PVM pVM = pVCpu->CTX_SUFF(pVM);
1495 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1496 if (pLeaf)
1497 {
1498 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1499 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1500 else
1501 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1502 }
1503
1504 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1505 if (pLeaf)
1506 {
1507 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1508 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1509 else
1510 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1511 }
1512#endif
1513
1514 return fOld;
1515}
1516
1517
1518/**
1519 * Gets the host CPU vendor.
1520 *
1521 * @returns CPU vendor.
1522 * @param pVM The cross context VM structure.
1523 */
1524VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1525{
1526 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1527}
1528
1529
1530/**
1531 * Gets the CPU vendor.
1532 *
1533 * @returns CPU vendor.
1534 * @param pVM The cross context VM structure.
1535 */
1536VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1537{
1538 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1539}
1540
1541
1542VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1543{
1544 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1545 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1546}
1547
1548
1549VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1550{
1551 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1552 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1553}
1554
1555
1556VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1557{
1558 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1559 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1560}
1561
1562
1563VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1564{
1565 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1566 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1567}
1568
1569
1570VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1571{
1572 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1573 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1574 return VINF_SUCCESS; /* No need to recalc. */
1575}
1576
1577
1578VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1579{
1580 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1581 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1582 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1583}
1584
1585
1586VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1587{
1588 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1589 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1590 if (iReg == 4 || iReg == 5)
1591 iReg += 2;
1592 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1593 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1594}
1595
1596
1597/**
1598 * Recalculates the hypervisor DRx register values based on current guest
1599 * registers and DBGF breakpoints, updating changed registers depending on the
1600 * context.
1601 *
1602 * This is called whenever a guest DRx register is modified (any context) and
1603 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1604 *
1605 * In raw-mode context this function will reload any (hyper) DRx registers which
1606 * comes out with a different value. It may also have to save the host debug
1607 * registers if that haven't been done already. In this context though, we'll
1608 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1609 * are only important when breakpoints are actually enabled.
1610 *
1611 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1612 * reloaded by the HM code if it changes. Further more, we will only use the
1613 * combined register set when the VBox debugger is actually using hardware BPs,
1614 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1615 * concern us here).
1616 *
1617 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1618 * all the time.
1619 *
1620 * @returns VINF_SUCCESS.
1621 * @param pVCpu The cross context virtual CPU structure.
1622 * @param iGstReg The guest debug register number that was modified.
1623 * UINT8_MAX if not guest register.
1624 * @param fForceHyper Used in HM to force hyper registers because of single
1625 * stepping.
1626 */
1627VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1628{
1629 PVM pVM = pVCpu->CTX_SUFF(pVM);
1630#ifndef IN_RING0
1631 RT_NOREF_PV(iGstReg);
1632#endif
1633
1634 /*
1635 * Compare the DR7s first.
1636 *
1637 * We only care about the enabled flags. GD is virtualized when we
1638 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1639 * always have the LE and GE bits set, so no need to check and disable
1640 * stuff if they're cleared like we have to for the guest DR7.
1641 */
1642 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1643 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1644 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1645 uGstDr7 = 0;
1646 else if (!(uGstDr7 & X86_DR7_LE))
1647 uGstDr7 &= ~X86_DR7_LE_ALL;
1648 else if (!(uGstDr7 & X86_DR7_GE))
1649 uGstDr7 &= ~X86_DR7_GE_ALL;
1650
1651 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1652
1653#ifdef IN_RING0
1654 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1655 fForceHyper = true;
1656#endif
1657 if ( (!VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7))
1658 & X86_DR7_ENABLED_MASK)
1659 {
1660 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1661#ifdef IN_RC
1662 bool const fRawModeEnabled = true;
1663#elif defined(IN_RING3)
1664 bool const fRawModeEnabled = VM_IS_RAW_MODE_ENABLED(pVM);
1665#endif
1666
1667 /*
1668 * Ok, something is enabled. Recalc each of the breakpoints, taking
1669 * the VM debugger ones of the guest ones. In raw-mode context we will
1670 * not allow breakpoints with values inside the hypervisor area.
1671 */
1672 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1673
1674 /* bp 0 */
1675 RTGCUINTREG uNewDr0;
1676 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1677 {
1678 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1679 uNewDr0 = DBGFBpGetDR0(pVM);
1680 }
1681 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1682 {
1683 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1684#ifndef IN_RING0
1685 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1686 uNewDr0 = 0;
1687 else
1688#endif
1689 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1690 }
1691 else
1692 uNewDr0 = 0;
1693
1694 /* bp 1 */
1695 RTGCUINTREG uNewDr1;
1696 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1697 {
1698 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1699 uNewDr1 = DBGFBpGetDR1(pVM);
1700 }
1701 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1702 {
1703 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1704#ifndef IN_RING0
1705 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1706 uNewDr1 = 0;
1707 else
1708#endif
1709 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1710 }
1711 else
1712 uNewDr1 = 0;
1713
1714 /* bp 2 */
1715 RTGCUINTREG uNewDr2;
1716 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1717 {
1718 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1719 uNewDr2 = DBGFBpGetDR2(pVM);
1720 }
1721 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1722 {
1723 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1724#ifndef IN_RING0
1725 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1726 uNewDr2 = 0;
1727 else
1728#endif
1729 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1730 }
1731 else
1732 uNewDr2 = 0;
1733
1734 /* bp 3 */
1735 RTGCUINTREG uNewDr3;
1736 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1737 {
1738 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1739 uNewDr3 = DBGFBpGetDR3(pVM);
1740 }
1741 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1742 {
1743 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1744#ifndef IN_RING0
1745 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1746 uNewDr3 = 0;
1747 else
1748#endif
1749 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1750 }
1751 else
1752 uNewDr3 = 0;
1753
1754 /*
1755 * Apply the updates.
1756 */
1757#ifdef IN_RC
1758 /* Make sure to save host registers first. */
1759 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1760 {
1761 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1762 {
1763 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1764 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1765 }
1766 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1767 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1768 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1769 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1770 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1771
1772 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1773 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1774 ASMSetDR0(uNewDr0);
1775 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1776 ASMSetDR1(uNewDr1);
1777 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1778 ASMSetDR2(uNewDr2);
1779 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1780 ASMSetDR3(uNewDr3);
1781 ASMSetDR6(X86_DR6_INIT_VAL);
1782 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1783 ASMSetDR7(uNewDr7);
1784 }
1785 else
1786#endif
1787 {
1788 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1789 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1790 CPUMSetHyperDR3(pVCpu, uNewDr3);
1791 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1792 CPUMSetHyperDR2(pVCpu, uNewDr2);
1793 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1794 CPUMSetHyperDR1(pVCpu, uNewDr1);
1795 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1796 CPUMSetHyperDR0(pVCpu, uNewDr0);
1797 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1798 CPUMSetHyperDR7(pVCpu, uNewDr7);
1799 }
1800 }
1801#ifdef IN_RING0
1802 else if (CPUMIsGuestDebugStateActive(pVCpu))
1803 {
1804 /*
1805 * Reload the register that was modified. Normally this won't happen
1806 * as we won't intercept DRx writes when not having the hyper debug
1807 * state loaded, but in case we do for some reason we'll simply deal
1808 * with it.
1809 */
1810 switch (iGstReg)
1811 {
1812 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1813 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1814 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1815 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1816 default:
1817 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1818 }
1819 }
1820#endif
1821 else
1822 {
1823 /*
1824 * No active debug state any more. In raw-mode this means we have to
1825 * make sure DR7 has everything disabled now, if we armed it already.
1826 * In ring-0 we might end up here when just single stepping.
1827 */
1828#if defined(IN_RC) || defined(IN_RING0)
1829 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1830 {
1831# ifdef IN_RC
1832 ASMSetDR7(X86_DR7_INIT_VAL);
1833# endif
1834 if (pVCpu->cpum.s.Hyper.dr[0])
1835 ASMSetDR0(0);
1836 if (pVCpu->cpum.s.Hyper.dr[1])
1837 ASMSetDR1(0);
1838 if (pVCpu->cpum.s.Hyper.dr[2])
1839 ASMSetDR2(0);
1840 if (pVCpu->cpum.s.Hyper.dr[3])
1841 ASMSetDR3(0);
1842 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1843 }
1844#endif
1845 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1846
1847 /* Clear all the registers. */
1848 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1849 pVCpu->cpum.s.Hyper.dr[3] = 0;
1850 pVCpu->cpum.s.Hyper.dr[2] = 0;
1851 pVCpu->cpum.s.Hyper.dr[1] = 0;
1852 pVCpu->cpum.s.Hyper.dr[0] = 0;
1853
1854 }
1855 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1856 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1857 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1858 pVCpu->cpum.s.Hyper.dr[7]));
1859
1860 return VINF_SUCCESS;
1861}
1862
1863
1864/**
1865 * Set the guest XCR0 register.
1866 *
1867 * Will load additional state if the FPU state is already loaded (in ring-0 &
1868 * raw-mode context).
1869 *
1870 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1871 * value.
1872 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1873 * @param uNewValue The new value.
1874 * @thread EMT(pVCpu)
1875 */
1876VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1877{
1878 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1879 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1880 /* The X87 bit cannot be cleared. */
1881 && (uNewValue & XSAVE_C_X87)
1882 /* AVX requires SSE. */
1883 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1884 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1885 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1886 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1887 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1888 )
1889 {
1890 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1891
1892 /* If more state components are enabled, we need to take care to load
1893 them if the FPU/SSE state is already loaded. May otherwise leak
1894 host state to the guest. */
1895 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1896 if (fNewComponents)
1897 {
1898#if defined(IN_RING0) || defined(IN_RC)
1899 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1900 {
1901 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1902 /* Adding more components. */
1903 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1904 else
1905 {
1906 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1907 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1908 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1909 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1910 }
1911 }
1912#endif
1913 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1914 }
1915 return VINF_SUCCESS;
1916 }
1917 return VERR_CPUM_RAISE_GP_0;
1918}
1919
1920
1921/**
1922 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1923 *
1924 * @returns true if in real mode, otherwise false.
1925 * @param pVCpu The cross context virtual CPU structure.
1926 */
1927VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1928{
1929 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1930 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1931}
1932
1933
1934/**
1935 * Tests if the guest has the Page Size Extension enabled (PSE).
1936 *
1937 * @returns true if in real mode, otherwise false.
1938 * @param pVCpu The cross context virtual CPU structure.
1939 */
1940VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1941{
1942 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1943 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1944 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1945}
1946
1947
1948/**
1949 * Tests if the guest has the paging enabled (PG).
1950 *
1951 * @returns true if in real mode, otherwise false.
1952 * @param pVCpu The cross context virtual CPU structure.
1953 */
1954VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1955{
1956 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1957 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1958}
1959
1960
1961/**
1962 * Tests if the guest has the paging enabled (PG).
1963 *
1964 * @returns true if in real mode, otherwise false.
1965 * @param pVCpu The cross context virtual CPU structure.
1966 */
1967VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1968{
1969 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1970 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1971}
1972
1973
1974/**
1975 * Tests if the guest is running in real mode or not.
1976 *
1977 * @returns true if in real mode, otherwise false.
1978 * @param pVCpu The cross context virtual CPU structure.
1979 */
1980VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1981{
1982 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1983 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1984}
1985
1986
1987/**
1988 * Tests if the guest is running in real or virtual 8086 mode.
1989 *
1990 * @returns @c true if it is, @c false if not.
1991 * @param pVCpu The cross context virtual CPU structure.
1992 */
1993VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1994{
1995 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1996 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1997 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1998}
1999
2000
2001/**
2002 * Tests if the guest is running in protected or not.
2003 *
2004 * @returns true if in protected mode, otherwise false.
2005 * @param pVCpu The cross context virtual CPU structure.
2006 */
2007VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
2008{
2009 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
2010 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2011}
2012
2013
2014/**
2015 * Tests if the guest is running in paged protected or not.
2016 *
2017 * @returns true if in paged protected mode, otherwise false.
2018 * @param pVCpu The cross context virtual CPU structure.
2019 */
2020VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
2021{
2022 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
2023 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2024}
2025
2026
2027/**
2028 * Tests if the guest is running in long mode or not.
2029 *
2030 * @returns true if in long mode, otherwise false.
2031 * @param pVCpu The cross context virtual CPU structure.
2032 */
2033VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
2034{
2035 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
2036 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2037}
2038
2039
2040/**
2041 * Tests if the guest is running in PAE mode or not.
2042 *
2043 * @returns true if in PAE mode, otherwise false.
2044 * @param pVCpu The cross context virtual CPU structure.
2045 */
2046VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
2047{
2048 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
2049 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2050 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2051 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2052 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2053 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2054}
2055
2056
2057/**
2058 * Tests if the guest is running in 64 bits mode or not.
2059 *
2060 * @returns true if in 64 bits protected mode, otherwise false.
2061 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2062 */
2063VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2064{
2065 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
2066 if (!CPUMIsGuestInLongMode(pVCpu))
2067 return false;
2068 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2069 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2070}
2071
2072
2073/**
2074 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2075 * registers.
2076 *
2077 * @returns true if in 64 bits protected mode, otherwise false.
2078 * @param pCtx Pointer to the current guest CPU context.
2079 */
2080VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2081{
2082 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2083}
2084
2085#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2086
2087/**
2088 *
2089 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2090 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2091 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2092 */
2093VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PCVMCPU pVCpu)
2094{
2095 return pVCpu->cpum.s.fRawEntered;
2096}
2097
2098/**
2099 * Transforms the guest CPU state to raw-ring mode.
2100 *
2101 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2102 *
2103 * @returns VBox status code. (recompiler failure)
2104 * @param pVCpu The cross context virtual CPU structure.
2105 * @see @ref pg_raw
2106 */
2107VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2108{
2109 PVM pVM = pVCpu->CTX_SUFF(pVM);
2110
2111 Assert(!pVCpu->cpum.s.fRawEntered);
2112 Assert(!pVCpu->cpum.s.fRemEntered);
2113 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2114
2115 /*
2116 * Are we in Ring-0?
2117 */
2118 if ( pCtx->ss.Sel
2119 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2120 && !pCtx->eflags.Bits.u1VM)
2121 {
2122 /*
2123 * Enter execution mode.
2124 */
2125 PATMRawEnter(pVM, pCtx);
2126
2127 /*
2128 * Set CPL to Ring-1.
2129 */
2130 pCtx->ss.Sel |= 1;
2131 if ( pCtx->cs.Sel
2132 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2133 pCtx->cs.Sel |= 1;
2134 }
2135 else
2136 {
2137# ifdef VBOX_WITH_RAW_RING1
2138 if ( EMIsRawRing1Enabled(pVM)
2139 && !pCtx->eflags.Bits.u1VM
2140 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2141 {
2142 /* Set CPL to Ring-2. */
2143 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2144 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2145 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2146 }
2147# else
2148 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2149 ("ring-1 code not supported\n"));
2150# endif
2151 /*
2152 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2153 */
2154 PATMRawEnter(pVM, pCtx);
2155 }
2156
2157 /*
2158 * Assert sanity.
2159 */
2160 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2161 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2162 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2163 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE));
2164
2165 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2166
2167 pVCpu->cpum.s.fRawEntered = true;
2168 return VINF_SUCCESS;
2169}
2170
2171
2172/**
2173 * Transforms the guest CPU state from raw-ring mode to correct values.
2174 *
2175 * This function will change any selector registers with DPL=1 to DPL=0.
2176 *
2177 * @returns Adjusted rc.
2178 * @param pVCpu The cross context virtual CPU structure.
2179 * @param rc Raw mode return code
2180 * @see @ref pg_raw
2181 */
2182VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2183{
2184 PVM pVM = pVCpu->CTX_SUFF(pVM);
2185
2186 /*
2187 * Don't leave if we've already left (in RC).
2188 */
2189 Assert(!pVCpu->cpum.s.fRemEntered);
2190 if (!pVCpu->cpum.s.fRawEntered)
2191 return rc;
2192 pVCpu->cpum.s.fRawEntered = false;
2193
2194 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2195 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2196 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2197 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2198
2199 /*
2200 * Are we executing in raw ring-1?
2201 */
2202 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2203 && !pCtx->eflags.Bits.u1VM)
2204 {
2205 /*
2206 * Leave execution mode.
2207 */
2208 PATMRawLeave(pVM, pCtx, rc);
2209 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2210 /** @todo See what happens if we remove this. */
2211 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2212 pCtx->ds.Sel &= ~X86_SEL_RPL;
2213 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2214 pCtx->es.Sel &= ~X86_SEL_RPL;
2215 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2216 pCtx->fs.Sel &= ~X86_SEL_RPL;
2217 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2218 pCtx->gs.Sel &= ~X86_SEL_RPL;
2219
2220 /*
2221 * Ring-1 selector => Ring-0.
2222 */
2223 pCtx->ss.Sel &= ~X86_SEL_RPL;
2224 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2225 pCtx->cs.Sel &= ~X86_SEL_RPL;
2226 }
2227 else
2228 {
2229 /*
2230 * PATM is taking care of the IOPL and IF flags for us.
2231 */
2232 PATMRawLeave(pVM, pCtx, rc);
2233 if (!pCtx->eflags.Bits.u1VM)
2234 {
2235# ifdef VBOX_WITH_RAW_RING1
2236 if ( EMIsRawRing1Enabled(pVM)
2237 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2238 {
2239 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2240 /** @todo See what happens if we remove this. */
2241 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2242 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2243 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2244 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2245 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2246 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2247 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2248 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2249
2250 /*
2251 * Ring-2 selector => Ring-1.
2252 */
2253 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2254 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2255 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2256 }
2257 else
2258 {
2259# endif
2260 /** @todo See what happens if we remove this. */
2261 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2262 pCtx->ds.Sel &= ~X86_SEL_RPL;
2263 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2264 pCtx->es.Sel &= ~X86_SEL_RPL;
2265 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2266 pCtx->fs.Sel &= ~X86_SEL_RPL;
2267 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2268 pCtx->gs.Sel &= ~X86_SEL_RPL;
2269# ifdef VBOX_WITH_RAW_RING1
2270 }
2271# endif
2272 }
2273 }
2274
2275 return rc;
2276}
2277
2278#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2279
2280/**
2281 * Updates the EFLAGS while we're in raw-mode.
2282 *
2283 * @param pVCpu The cross context virtual CPU structure.
2284 * @param fEfl The new EFLAGS value.
2285 */
2286VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2287{
2288#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2289 if (pVCpu->cpum.s.fRawEntered)
2290 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2291 else
2292#endif
2293 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2294}
2295
2296
2297/**
2298 * Gets the EFLAGS while we're in raw-mode.
2299 *
2300 * @returns The eflags.
2301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2302 */
2303VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2304{
2305#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2306 if (pVCpu->cpum.s.fRawEntered)
2307 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2308#endif
2309 return pVCpu->cpum.s.Guest.eflags.u32;
2310}
2311
2312
2313/**
2314 * Sets the specified changed flags (CPUM_CHANGED_*).
2315 *
2316 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2317 * @param fChangedAdd The changed flags to add.
2318 */
2319VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2320{
2321 pVCpu->cpum.s.fChanged |= fChangedAdd;
2322}
2323
2324
2325/**
2326 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2327 *
2328 * @returns true if supported.
2329 * @returns false if not supported.
2330 * @param pVM The cross context VM structure.
2331 */
2332VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2333{
2334 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2335}
2336
2337
2338/**
2339 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2340 * @returns true if used.
2341 * @returns false if not used.
2342 * @param pVM The cross context VM structure.
2343 */
2344VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2345{
2346 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2347}
2348
2349
2350/**
2351 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2352 * @returns true if used.
2353 * @returns false if not used.
2354 * @param pVM The cross context VM structure.
2355 */
2356VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2357{
2358 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2359}
2360
2361#ifdef IN_RC
2362
2363/**
2364 * Lazily sync in the FPU/XMM state.
2365 *
2366 * @returns VBox status code.
2367 * @param pVCpu The cross context virtual CPU structure.
2368 */
2369VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2370{
2371 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2372}
2373
2374#endif /* !IN_RC */
2375
2376/**
2377 * Checks if we activated the FPU/XMM state of the guest OS.
2378 *
2379 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2380 * time we'll be executing guest code, so it may return true for 64-on-32 when
2381 * we still haven't actually loaded the FPU status, just scheduled it to be
2382 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2383 *
2384 * @returns true / false.
2385 * @param pVCpu The cross context virtual CPU structure.
2386 */
2387VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2388{
2389 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2390}
2391
2392
2393/**
2394 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2395 *
2396 * @returns true / false.
2397 * @param pVCpu The cross context virtual CPU structure.
2398 */
2399VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2400{
2401 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2402}
2403
2404
2405/**
2406 * Checks if we saved the FPU/XMM state of the host OS.
2407 *
2408 * @returns true / false.
2409 * @param pVCpu The cross context virtual CPU structure.
2410 */
2411VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2412{
2413 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2414}
2415
2416
2417/**
2418 * Checks if the guest debug state is active.
2419 *
2420 * @returns boolean
2421 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2422 */
2423VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2424{
2425 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2426}
2427
2428
2429/**
2430 * Checks if the guest debug state is to be made active during the world-switch
2431 * (currently only used for the 32->64 switcher case).
2432 *
2433 * @returns boolean
2434 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2435 */
2436VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2437{
2438 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2439}
2440
2441
2442/**
2443 * Checks if the hyper debug state is active.
2444 *
2445 * @returns boolean
2446 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2447 */
2448VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2449{
2450 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2451}
2452
2453
2454/**
2455 * Checks if the hyper debug state is to be made active during the world-switch
2456 * (currently only used for the 32->64 switcher case).
2457 *
2458 * @returns boolean
2459 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2460 */
2461VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2462{
2463 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2464}
2465
2466
2467/**
2468 * Mark the guest's debug state as inactive.
2469 *
2470 * @returns boolean
2471 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2472 * @todo This API doesn't make sense any more.
2473 */
2474VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2475{
2476 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2477 NOREF(pVCpu);
2478}
2479
2480
2481/**
2482 * Get the current privilege level of the guest.
2483 *
2484 * @returns CPL
2485 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2486 */
2487VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2488{
2489 /*
2490 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2491 *
2492 * Note! We used to check CS.DPL here, assuming it was always equal to
2493 * CPL even if a conforming segment was loaded. But this turned out to
2494 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2495 * during install after a far call to ring 2 with VT-x. Then on newer
2496 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2497 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2498 *
2499 * So, forget CS.DPL, always use SS.DPL.
2500 *
2501 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2502 * isn't necessarily equal if the segment is conforming.
2503 * See section 4.11.1 in the AMD manual.
2504 *
2505 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2506 * right after real->prot mode switch and when in V8086 mode? That
2507 * section says the RPL specified in a direct transfere (call, jmp,
2508 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2509 * it would be impossible for an exception handle or the iret
2510 * instruction to figure out whether SS:ESP are part of the frame
2511 * or not. VBox or qemu bug must've lead to this misconception.
2512 *
2513 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2514 * selector into SS with an RPL other than the CPL when CPL != 3 and
2515 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2516 * RPL = CPL. Weird.
2517 */
2518 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
2519 uint32_t uCpl;
2520 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2521 {
2522 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2523 {
2524 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2525 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2526 else
2527 {
2528 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2529#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2530# ifdef VBOX_WITH_RAW_RING1
2531 if (pVCpu->cpum.s.fRawEntered)
2532 {
2533 if ( uCpl == 2
2534 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2535 uCpl = 1;
2536 else if (uCpl == 1)
2537 uCpl = 0;
2538 }
2539 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2540# else
2541 if (uCpl == 1)
2542 uCpl = 0;
2543# endif
2544#endif
2545 }
2546 }
2547 else
2548 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2549 }
2550 else
2551 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2552 return uCpl;
2553}
2554
2555
2556/**
2557 * Gets the current guest CPU mode.
2558 *
2559 * If paging mode is what you need, check out PGMGetGuestMode().
2560 *
2561 * @returns The CPU mode.
2562 * @param pVCpu The cross context virtual CPU structure.
2563 */
2564VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2565{
2566 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
2567 CPUMMODE enmMode;
2568 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2569 enmMode = CPUMMODE_REAL;
2570 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2571 enmMode = CPUMMODE_PROTECTED;
2572 else
2573 enmMode = CPUMMODE_LONG;
2574
2575 return enmMode;
2576}
2577
2578
2579/**
2580 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2581 *
2582 * @returns 16, 32 or 64.
2583 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2584 */
2585VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2586{
2587 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
2588
2589 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2590 return 16;
2591
2592 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2593 {
2594 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2595 return 16;
2596 }
2597
2598 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2599 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2600 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2601 return 64;
2602
2603 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2604 return 32;
2605
2606 return 16;
2607}
2608
2609
2610VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2611{
2612 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
2613
2614 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2615 return DISCPUMODE_16BIT;
2616
2617 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2618 {
2619 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2620 return DISCPUMODE_16BIT;
2621 }
2622
2623 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2624 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2625 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2626 return DISCPUMODE_64BIT;
2627
2628 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2629 return DISCPUMODE_32BIT;
2630
2631 return DISCPUMODE_16BIT;
2632}
2633
2634
2635/**
2636 * Gets the guest MXCSR_MASK value.
2637 *
2638 * This does not access the x87 state, but the value we determined at VM
2639 * initialization.
2640 *
2641 * @returns MXCSR mask.
2642 * @param pVM The cross context VM structure.
2643 */
2644VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
2645{
2646 return pVM->cpum.s.GuestInfo.fMxCsrMask;
2647}
2648
2649
2650/**
2651 * Returns whether the guest has physical interrupts enabled.
2652 *
2653 * @returns @c true if interrupts are enabled, @c false otherwise.
2654 * @param pVCpu The cross context virtual CPU structure.
2655 *
2656 * @remarks Warning! This function does -not- take into account the global-interrupt
2657 * flag (GIF).
2658 */
2659VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
2660{
2661 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
2662 {
2663#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2664 uint32_t const fEFlags = !pVCpu->cpum.s.fRawEntered ? pVCpu->cpum.s.Guest.eflags.u : CPUMRawGetEFlags(pVCpu);
2665#else
2666 uint32_t const fEFlags = pVCpu->cpum.s.Guest.eflags.u;
2667#endif
2668 return RT_BOOL(fEFlags & X86_EFL_IF);
2669 }
2670
2671 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
2672 return CPUMIsGuestVmxPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2673
2674 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
2675 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2676}
2677
2678
2679/**
2680 * Returns whether the nested-guest has virtual interrupts enabled.
2681 *
2682 * @returns @c true if interrupts are enabled, @c false otherwise.
2683 * @param pVCpu The cross context virtual CPU structure.
2684 *
2685 * @remarks Warning! This function does -not- take into account the global-interrupt
2686 * flag (GIF).
2687 */
2688VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
2689{
2690 Assert(CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest));
2691
2692 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
2693 return CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2694
2695 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
2696 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2697}
2698
2699
2700/**
2701 * Calculates the interruptiblity of the guest.
2702 *
2703 * @returns Interruptibility level.
2704 * @param pVCpu The cross context virtual CPU structure.
2705 */
2706VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
2707{
2708#if 1
2709 /* Global-interrupt flag blocks pretty much everything we care about here. */
2710 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
2711 {
2712 /*
2713 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
2714 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
2715 * or raw-mode). Hence we use the function below which handles the details.
2716 */
2717 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
2718 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
2719 {
2720 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
2721 || CPUMIsGuestVirtIntrEnabled(pVCpu))
2722 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
2723
2724 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
2725 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
2726 }
2727
2728 /*
2729 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
2730 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
2731 * However, there is some uncertainity regarding the converse, i.e. whether
2732 * NMI-blocking until IRET blocks delivery of physical interrupts.
2733 *
2734 * See Intel spec. 25.4.1 "Event Blocking".
2735 */
2736 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2737 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2738
2739 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2740 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2741
2742 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2743 }
2744 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2745#else
2746 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
2747 {
2748 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2749 {
2750 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
2751 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
2752
2753 /** @todo does blocking NMIs mean interrupts are also inhibited? */
2754 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2755 {
2756 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2757 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2758 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2759 }
2760 AssertFailed();
2761 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2762 }
2763 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2764 }
2765 else
2766 {
2767 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2768 {
2769 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2770 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2771 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2772 }
2773 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2774 }
2775#endif
2776}
2777
2778
2779/**
2780 * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.
2781 *
2782 * @returns @c true if NMIs are blocked, @c false otherwise.
2783 * @param pVCpu The cross context virtual CPU structure.
2784 */
2785VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
2786{
2787#ifndef IN_RC
2788 /*
2789 * Return the state of guest-NMI blocking in any of the following cases:
2790 * - We're not executing a nested-guest.
2791 * - We're executing an SVM nested-guest[1].
2792 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2793 *
2794 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2795 * SVM hypervisors must track NMI blocking themselves by intercepting
2796 * the IRET instruction after injection of an NMI.
2797 */
2798 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2799 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2800 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2801 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2802 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2803
2804 /*
2805 * Return the state of virtual-NMI blocking, if we are executing a
2806 * VMX nested-guest with virtual-NMIs enabled.
2807 */
2808 return CPUMIsGuestVmxVirtNmiBlocking(pVCpu, pCtx);
2809#else
2810 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2811#endif
2812}
2813
2814
2815/**
2816 * Sets blocking delivery of NMIs to the guest.
2817 *
2818 * @param pVCpu The cross context virtual CPU structure.
2819 * @param fBlock Whether NMIs are blocked or not.
2820 */
2821VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
2822{
2823#ifndef IN_RC
2824 /*
2825 * Set the state of guest-NMI blocking in any of the following cases:
2826 * - We're not executing a nested-guest.
2827 * - We're executing an SVM nested-guest[1].
2828 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2829 *
2830 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2831 * SVM hypervisors must track NMI blocking themselves by intercepting
2832 * the IRET instruction after injection of an NMI.
2833 */
2834 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2835 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2836 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2837 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2838 {
2839 if (fBlock)
2840 {
2841 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2842 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2843 }
2844 else
2845 {
2846 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2847 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2848 }
2849 return;
2850 }
2851
2852 /*
2853 * Set the state of virtual-NMI blocking, if we are executing a
2854 * VMX nested-guest with virtual-NMIs enabled.
2855 */
2856 return CPUMSetGuestVmxVirtNmiBlocking(pVCpu, pCtx, fBlock);
2857#else
2858 if (fBlock)
2859 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2860 else
2861 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2862#endif
2863}
2864
2865
2866/**
2867 * Checks whether the SVM nested-guest has physical interrupts enabled.
2868 *
2869 * @returns true if interrupts are enabled, false otherwise.
2870 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2871 * @param pCtx The guest-CPU context.
2872 *
2873 * @remarks This does -not- take into account the global-interrupt flag.
2874 */
2875VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2876{
2877 /** @todo Optimization: Avoid this function call and use a pointer to the
2878 * relevant eflags instead (setup during VMRUN instruction emulation). */
2879#ifdef IN_RC
2880 RT_NOREF2(pVCpu, pCtx);
2881 AssertReleaseFailedReturn(false);
2882#else
2883 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2884
2885 X86EFLAGS fEFlags;
2886 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2887 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2888 else
2889 fEFlags.u = pCtx->eflags.u;
2890
2891 return fEFlags.Bits.u1IF;
2892#endif
2893}
2894
2895
2896/**
2897 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2898 * for injection by VMRUN instruction) interrupts.
2899 *
2900 * @returns VBox status code.
2901 * @retval true if it's ready, false otherwise.
2902 *
2903 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2904 * @param pCtx The guest-CPU context.
2905 */
2906VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2907{
2908#ifdef IN_RC
2909 RT_NOREF2(pVCpu, pCtx);
2910 AssertReleaseFailedReturn(false);
2911#else
2912 RT_NOREF(pVCpu);
2913 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2914
2915 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2916 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2917 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2918 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2919 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2920 return false;
2921
2922 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2923#endif
2924}
2925
2926
2927/**
2928 * Gets the pending SVM nested-guest interruptvector.
2929 *
2930 * @returns The nested-guest interrupt to inject.
2931 * @param pCtx The guest-CPU context.
2932 */
2933VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2934{
2935#ifdef IN_RC
2936 RT_NOREF(pCtx);
2937 AssertReleaseFailedReturn(0);
2938#else
2939 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2940 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
2941#endif
2942}
2943
2944
2945/**
2946 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2947 *
2948 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2949 * @param pCtx The guest-CPU context.
2950 */
2951VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx)
2952{
2953 /*
2954 * Reload the guest's "host state".
2955 */
2956 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2957 pCtx->es = pHostState->es;
2958 pCtx->cs = pHostState->cs;
2959 pCtx->ss = pHostState->ss;
2960 pCtx->ds = pHostState->ds;
2961 pCtx->gdtr = pHostState->gdtr;
2962 pCtx->idtr = pHostState->idtr;
2963 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2964 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2965 pCtx->cr3 = pHostState->uCr3;
2966 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2967 pCtx->rflags = pHostState->rflags;
2968 pCtx->rflags.Bits.u1VM = 0;
2969 pCtx->rip = pHostState->uRip;
2970 pCtx->rsp = pHostState->uRsp;
2971 pCtx->rax = pHostState->uRax;
2972 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2973 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2974 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2975
2976 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2977 * raise \#GP(0) in the guest. */
2978
2979 /** @todo check the loaded host-state for consistency. Figure out what
2980 * exactly this involves? */
2981}
2982
2983
2984/**
2985 * Saves the host-state to the host-state save area as part of a VMRUN.
2986 *
2987 * @param pCtx The guest-CPU context.
2988 * @param cbInstr The length of the VMRUN instruction in bytes.
2989 */
2990VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2991{
2992 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2993 pHostState->es = pCtx->es;
2994 pHostState->cs = pCtx->cs;
2995 pHostState->ss = pCtx->ss;
2996 pHostState->ds = pCtx->ds;
2997 pHostState->gdtr = pCtx->gdtr;
2998 pHostState->idtr = pCtx->idtr;
2999 pHostState->uEferMsr = pCtx->msrEFER;
3000 pHostState->uCr0 = pCtx->cr0;
3001 pHostState->uCr3 = pCtx->cr3;
3002 pHostState->uCr4 = pCtx->cr4;
3003 pHostState->rflags = pCtx->rflags;
3004 pHostState->uRip = pCtx->rip + cbInstr;
3005 pHostState->uRsp = pCtx->rsp;
3006 pHostState->uRax = pCtx->rax;
3007}
3008
3009
3010/**
3011 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
3012 * nested-guest.
3013 *
3014 * @returns The TSC offset after applying any nested-guest TSC offset.
3015 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3016 * @param uTicks The guest TSC.
3017 *
3018 * @sa CPUMRemoveNestedGuestTscOffset.
3019 */
3020VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
3021{
3022#ifndef IN_RC
3023 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3024 if (CPUMIsGuestInVmxNonRootMode(pCtx))
3025 {
3026 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
3027 Assert(pVmcs);
3028 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
3029 return uTicks + pVmcs->u64TscOffset.u;
3030 return uTicks;
3031 }
3032
3033 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3034 {
3035 uint64_t u64TscOffset;
3036 if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
3037 {
3038 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
3039 Assert(pVmcb);
3040 u64TscOffset = pVmcb->ctrl.u64TSCOffset;
3041 }
3042 return uTicks + u64TscOffset;
3043 }
3044#else
3045 RT_NOREF(pVCpu);
3046#endif
3047 return uTicks;
3048}
3049
3050
3051/**
3052 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
3053 * guest.
3054 *
3055 * @returns The TSC offset after removing any nested-guest TSC offset.
3056 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3057 * @param uTicks The nested-guest TSC.
3058 *
3059 * @sa CPUMApplyNestedGuestTscOffset.
3060 */
3061VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
3062{
3063#ifndef IN_RC
3064 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3065 if (CPUMIsGuestInVmxNonRootMode(pCtx))
3066 {
3067 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
3068 {
3069 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
3070 Assert(pVmcs);
3071 return uTicks - pVmcs->u64TscOffset.u;
3072 }
3073 return uTicks;
3074 }
3075
3076 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3077 {
3078 uint64_t u64TscOffset;
3079 if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
3080 {
3081 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
3082 Assert(pVmcb);
3083 u64TscOffset = pVmcb->ctrl.u64TSCOffset;
3084 }
3085 return uTicks - u64TscOffset;
3086 }
3087#else
3088 RT_NOREF(pVCpu);
3089#endif
3090 return uTicks;
3091}
3092
3093
3094/**
3095 * Used to dynamically imports state residing in NEM or HM.
3096 *
3097 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
3098 *
3099 * @returns VBox status code.
3100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3101 * @param fExtrnImport The fields to import.
3102 * @thread EMT(pVCpu)
3103 */
3104VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPU pVCpu, uint64_t fExtrnImport)
3105{
3106 VMCPU_ASSERT_EMT(pVCpu);
3107 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
3108 {
3109#ifndef IN_RC
3110 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
3111 {
3112 case CPUMCTX_EXTRN_KEEPER_NEM:
3113 {
3114 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
3115 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
3116 return rc;
3117 }
3118
3119 case CPUMCTX_EXTRN_KEEPER_HM:
3120 {
3121#ifdef IN_RING0
3122 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
3123 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
3124 return rc;
3125#else
3126 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
3127 return VINF_SUCCESS;
3128#endif
3129 }
3130 default:
3131 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
3132 }
3133#else
3134 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
3135#endif
3136 }
3137 return VINF_SUCCESS;
3138}
3139
3140
3141/**
3142 * Gets valid CR4 bits for the guest.
3143 *
3144 * @returns Valid CR4 bits.
3145 * @param pVM The cross context VM structure.
3146 */
3147VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
3148{
3149 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
3150 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
3151 | X86_CR4_TSD | X86_CR4_DE
3152 | X86_CR4_PSE | X86_CR4_PAE
3153 | X86_CR4_MCE | X86_CR4_PGE
3154 | X86_CR4_PCE
3155 | X86_CR4_OSXMMEEXCPT; /** @todo r=ramshankar: Introduced in Pentium III along with SSE. Check fSse here? */
3156 if (pGuestFeatures->fFxSaveRstor)
3157 fMask |= X86_CR4_OSFXSR;
3158 if (pGuestFeatures->fVmx)
3159 fMask |= X86_CR4_VMXE;
3160 if (pGuestFeatures->fXSaveRstor)
3161 fMask |= X86_CR4_OSXSAVE;
3162 if (pGuestFeatures->fPcid)
3163 fMask |= X86_CR4_PCIDE;
3164 if (pGuestFeatures->fFsGsBase)
3165 fMask |= X86_CR4_FSGSBASE;
3166 return fMask;
3167}
3168
3169
3170/**
3171 * Gets the read and write permission bits for an MSR in an MSR bitmap.
3172 *
3173 * @returns VMXMSRPM_XXX - the MSR permission.
3174 * @param pvMsrBitmap Pointer to the MSR bitmap.
3175 * @param idMsr The MSR to get permissions for.
3176 *
3177 * @sa hmR0VmxSetMsrPermission.
3178 */
3179VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
3180{
3181 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
3182
3183 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
3184
3185 /*
3186 * MSR Layout:
3187 * Byte index MSR range Interpreted as
3188 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
3189 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
3190 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
3191 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
3192 *
3193 * A bit corresponding to an MSR within the above range causes a VM-exit
3194 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
3195 * the MSR range, it always cause a VM-exit.
3196 *
3197 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
3198 */
3199 uint32_t const offBitmapRead = 0;
3200 uint32_t const offBitmapWrite = 0x800;
3201 uint32_t offMsr;
3202 uint32_t iBit;
3203 if (idMsr <= UINT32_C(0x00001fff))
3204 {
3205 offMsr = 0;
3206 iBit = idMsr;
3207 }
3208 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
3209 {
3210 offMsr = 0x400;
3211 iBit = idMsr - UINT32_C(0xc0000000);
3212 }
3213 else
3214 {
3215 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
3216 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
3217 }
3218
3219 /*
3220 * Get the MSR read permissions.
3221 */
3222 uint32_t fRet;
3223 uint32_t const offMsrRead = offBitmapRead + offMsr;
3224 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
3225 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
3226 fRet = VMXMSRPM_EXIT_RD;
3227 else
3228 fRet = VMXMSRPM_ALLOW_RD;
3229
3230 /*
3231 * Get the MSR write permissions.
3232 */
3233 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
3234 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
3235 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
3236 fRet |= VMXMSRPM_EXIT_WR;
3237 else
3238 fRet |= VMXMSRPM_ALLOW_WR;
3239
3240 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
3241 return fRet;
3242}
3243
3244
3245/**
3246 * Gets the permission bits for the specified I/O port from the given I/O bitmaps.
3247 *
3248 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
3249 * @param pvIoBitmapA Pointer to I/O bitmap A.
3250 * @param pvIoBitmapB Pointer to I/O bitmap B.
3251 * @param uPort The I/O port being accessed.
3252 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3253 */
3254VMM_INT_DECL(bool) CPUMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
3255 uint8_t cbAccess)
3256{
3257 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3258
3259 /*
3260 * If the I/O port access wraps around the 16-bit port I/O space,
3261 * we must cause a VM-exit.
3262 *
3263 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3264 */
3265 /** @todo r=ramshankar: Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc
3266 * respectively are valid and do not constitute a wrap around from what I
3267 * understand. Verify this later. */
3268 uint32_t const uPortLast = uPort + cbAccess;
3269 if (uPortLast > 0x10000)
3270 return true;
3271
3272 /* Read the appropriate bit from the corresponding IO bitmap. */
3273 void const *pvIoBitmap = uPort < 0x8000 ? pvIoBitmapA : pvIoBitmapB;
3274 return ASMBitTest(pvIoBitmap, uPort);
3275}
3276
3277
3278/**
3279 * Returns whether the given VMCS field is valid and supported for the guest.
3280 *
3281 * @param pVM The cross context VM structure.
3282 * @param u64VmcsField The VMCS field.
3283 *
3284 * @remarks This takes into account the CPU features exposed to the guest.
3285 */
3286VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVM pVM, uint64_t u64VmcsField)
3287{
3288#ifndef IN_RC
3289 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
3290 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
3291 if (!uFieldEncHi)
3292 { /* likely */ }
3293 else
3294 return false;
3295
3296 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
3297 switch (uFieldEncLo)
3298 {
3299 /*
3300 * 16-bit fields.
3301 */
3302 /* Control fields. */
3303 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
3304 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
3305 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
3306
3307 /* Guest-state fields. */
3308 case VMX_VMCS16_GUEST_ES_SEL:
3309 case VMX_VMCS16_GUEST_CS_SEL:
3310 case VMX_VMCS16_GUEST_SS_SEL:
3311 case VMX_VMCS16_GUEST_DS_SEL:
3312 case VMX_VMCS16_GUEST_FS_SEL:
3313 case VMX_VMCS16_GUEST_GS_SEL:
3314 case VMX_VMCS16_GUEST_LDTR_SEL:
3315 case VMX_VMCS16_GUEST_TR_SEL: return true;
3316 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
3317 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
3318
3319 /* Host-state fields. */
3320 case VMX_VMCS16_HOST_ES_SEL:
3321 case VMX_VMCS16_HOST_CS_SEL:
3322 case VMX_VMCS16_HOST_SS_SEL:
3323 case VMX_VMCS16_HOST_DS_SEL:
3324 case VMX_VMCS16_HOST_FS_SEL:
3325 case VMX_VMCS16_HOST_GS_SEL:
3326 case VMX_VMCS16_HOST_TR_SEL: return true;
3327
3328 /*
3329 * 64-bit fields.
3330 */
3331 /* Control fields. */
3332 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
3333 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
3334 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
3335 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
3336 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
3337 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
3338 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
3339 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
3340 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
3341 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
3342 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
3343 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
3344 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
3345 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
3346 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
3347 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
3348 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
3349 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
3350 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
3351 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
3352 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
3353 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
3354 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
3355 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
3356 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
3357 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
3358 case VMX_VMCS64_CTRL_EPTP_FULL:
3359 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
3360 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
3361 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
3362 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
3363 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
3364 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
3365 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
3366 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
3367 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
3368 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
3369 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
3370 {
3371 PCVMCPU pVCpu = &pVM->aCpus[0];
3372 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
3373 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
3374 }
3375 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
3376 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
3377 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
3378 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
3379 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
3380 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
3381 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
3382 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
3383 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
3384 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
3385 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
3386 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
3387
3388 /* Read-only data fields. */
3389 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
3390 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
3391
3392 /* Guest-state fields. */
3393 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
3394 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
3395 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
3396 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
3397 case VMX_VMCS64_GUEST_PAT_FULL:
3398 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
3399 case VMX_VMCS64_GUEST_EFER_FULL:
3400 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
3401 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
3402 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
3403 case VMX_VMCS64_GUEST_PDPTE0_FULL:
3404 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
3405 case VMX_VMCS64_GUEST_PDPTE1_FULL:
3406 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
3407 case VMX_VMCS64_GUEST_PDPTE2_FULL:
3408 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
3409 case VMX_VMCS64_GUEST_PDPTE3_FULL:
3410 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
3411 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
3412 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
3413
3414 /* Host-state fields. */
3415 case VMX_VMCS64_HOST_PAT_FULL:
3416 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
3417 case VMX_VMCS64_HOST_EFER_FULL:
3418 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
3419 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
3420 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
3421
3422 /*
3423 * 32-bit fields.
3424 */
3425 /* Control fields. */
3426 case VMX_VMCS32_CTRL_PIN_EXEC:
3427 case VMX_VMCS32_CTRL_PROC_EXEC:
3428 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
3429 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
3430 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
3431 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
3432 case VMX_VMCS32_CTRL_EXIT:
3433 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
3434 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
3435 case VMX_VMCS32_CTRL_ENTRY:
3436 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
3437 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
3438 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
3439 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
3440 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
3441 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
3442 case VMX_VMCS32_CTRL_PLE_GAP:
3443 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
3444
3445 /* Read-only data fields. */
3446 case VMX_VMCS32_RO_VM_INSTR_ERROR:
3447 case VMX_VMCS32_RO_EXIT_REASON:
3448 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
3449 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
3450 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
3451 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
3452 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
3453 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
3454
3455 /* Guest-state fields. */
3456 case VMX_VMCS32_GUEST_ES_LIMIT:
3457 case VMX_VMCS32_GUEST_CS_LIMIT:
3458 case VMX_VMCS32_GUEST_SS_LIMIT:
3459 case VMX_VMCS32_GUEST_DS_LIMIT:
3460 case VMX_VMCS32_GUEST_FS_LIMIT:
3461 case VMX_VMCS32_GUEST_GS_LIMIT:
3462 case VMX_VMCS32_GUEST_LDTR_LIMIT:
3463 case VMX_VMCS32_GUEST_TR_LIMIT:
3464 case VMX_VMCS32_GUEST_GDTR_LIMIT:
3465 case VMX_VMCS32_GUEST_IDTR_LIMIT:
3466 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
3467 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
3468 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
3469 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
3470 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
3471 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
3472 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
3473 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
3474 case VMX_VMCS32_GUEST_INT_STATE:
3475 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
3476 case VMX_VMCS32_GUEST_SMBASE:
3477 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
3478 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
3479
3480 /* Host-state fields. */
3481 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
3482
3483 /*
3484 * Natural-width fields.
3485 */
3486 /* Control fields. */
3487 case VMX_VMCS_CTRL_CR0_MASK:
3488 case VMX_VMCS_CTRL_CR4_MASK:
3489 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
3490 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
3491 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
3492 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
3493 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
3494 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
3495
3496 /* Read-only data fields. */
3497 case VMX_VMCS_RO_EXIT_QUALIFICATION:
3498 case VMX_VMCS_RO_IO_RCX:
3499 case VMX_VMCS_RO_IO_RSI:
3500 case VMX_VMCS_RO_IO_RDI:
3501 case VMX_VMCS_RO_IO_RIP:
3502 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
3503
3504 /* Guest-state fields. */
3505 case VMX_VMCS_GUEST_CR0:
3506 case VMX_VMCS_GUEST_CR3:
3507 case VMX_VMCS_GUEST_CR4:
3508 case VMX_VMCS_GUEST_ES_BASE:
3509 case VMX_VMCS_GUEST_CS_BASE:
3510 case VMX_VMCS_GUEST_SS_BASE:
3511 case VMX_VMCS_GUEST_DS_BASE:
3512 case VMX_VMCS_GUEST_FS_BASE:
3513 case VMX_VMCS_GUEST_GS_BASE:
3514 case VMX_VMCS_GUEST_LDTR_BASE:
3515 case VMX_VMCS_GUEST_TR_BASE:
3516 case VMX_VMCS_GUEST_GDTR_BASE:
3517 case VMX_VMCS_GUEST_IDTR_BASE:
3518 case VMX_VMCS_GUEST_DR7:
3519 case VMX_VMCS_GUEST_RSP:
3520 case VMX_VMCS_GUEST_RIP:
3521 case VMX_VMCS_GUEST_RFLAGS:
3522 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
3523 case VMX_VMCS_GUEST_SYSENTER_ESP:
3524 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
3525
3526 /* Host-state fields. */
3527 case VMX_VMCS_HOST_CR0:
3528 case VMX_VMCS_HOST_CR3:
3529 case VMX_VMCS_HOST_CR4:
3530 case VMX_VMCS_HOST_FS_BASE:
3531 case VMX_VMCS_HOST_GS_BASE:
3532 case VMX_VMCS_HOST_TR_BASE:
3533 case VMX_VMCS_HOST_GDTR_BASE:
3534 case VMX_VMCS_HOST_IDTR_BASE:
3535 case VMX_VMCS_HOST_SYSENTER_ESP:
3536 case VMX_VMCS_HOST_SYSENTER_EIP:
3537 case VMX_VMCS_HOST_RSP:
3538 case VMX_VMCS_HOST_RIP: return true;
3539 }
3540
3541 return false;
3542#else
3543 RT_NOREF2(pVM, u64VmcsField);
3544 return false;
3545#endif
3546}
3547
3548
3549/**
3550 * Checks whether the given I/O access should cause a nested-guest VM-exit.
3551 *
3552 * @returns @c true if set, @c false otherwise.
3553 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3554 * @param u16Port The I/O port being accessed.
3555 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3556 */
3557VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
3558{
3559#ifndef IN_RC
3560 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3561 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
3562 return true;
3563
3564 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
3565 {
3566 uint8_t const *pbIoBitmapA = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap);
3567 uint8_t const *pbIoBitmapB = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
3568 Assert(pbIoBitmapA);
3569 Assert(pbIoBitmapB);
3570 return CPUMGetVmxIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
3571 }
3572
3573 return false;
3574#else
3575 RT_NOREF3(pVCpu, u16Port, cbAccess);
3576 return false;
3577#endif
3578}
3579
3580
3581/**
3582 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
3583 * VM-exit or not.
3584 *
3585 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
3586 * @param pVCpu The cross context virtual CPU structure.
3587 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
3588 * VMX_EXIT_VMREAD).
3589 * @param u64FieldEnc The VMCS field encoding.
3590 */
3591VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc)
3592{
3593#ifndef IN_RC
3594 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
3595 Assert( uExitReason == VMX_EXIT_VMREAD
3596 || uExitReason == VMX_EXIT_VMWRITE);
3597
3598 /*
3599 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
3600 */
3601 if (!pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fVmxVmcsShadowing)
3602 return true;
3603
3604 /*
3605 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
3606 * is intercepted. This excludes any reserved bits in the valid parts of the field
3607 * encoding (i.e. bit 12).
3608 */
3609 if (u64FieldEnc & VMX_VMCSFIELD_RSVD_MASK)
3610 return true;
3611
3612 /*
3613 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
3614 */
3615 uint32_t const u32FieldEnc = RT_LO_U32(u64FieldEnc);
3616 Assert(u32FieldEnc >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3617 Assert(pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
3618 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
3619 ? (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
3620 : (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
3621 Assert(pbBitmap);
3622 pbBitmap += (u32FieldEnc >> 3);
3623 if (*pbBitmap & RT_BIT(u32FieldEnc & 7))
3624 return true;
3625
3626 return false;
3627
3628#else
3629 RT_NOREF3(pVCpu, uExitReason, u64FieldEnc);
3630 return false;
3631#endif
3632}
3633
3634
3635
3636/**
3637 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
3638 *
3639 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
3640 * @param u16Port The IO port being accessed.
3641 * @param enmIoType The type of IO access.
3642 * @param cbReg The IO operand size in bytes.
3643 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
3644 * @param iEffSeg The effective segment number.
3645 * @param fRep Whether this is a repeating IO instruction (REP prefix).
3646 * @param fStrIo Whether this is a string IO instruction.
3647 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
3648 * Optional, can be NULL.
3649 */
3650VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
3651 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
3652 PSVMIOIOEXITINFO pIoExitInfo)
3653{
3654 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
3655 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
3656
3657 /*
3658 * The IOPM layout:
3659 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
3660 * two 4K pages.
3661 *
3662 * For IO instructions that access more than a single byte, the permission bits
3663 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
3664 *
3665 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
3666 * we need 3 extra bits beyond the second 4K page.
3667 */
3668 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
3669
3670 uint16_t const offIopm = u16Port >> 3;
3671 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
3672 uint8_t const cShift = u16Port - (offIopm << 3);
3673 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
3674
3675 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
3676 Assert(pbIopm);
3677 pbIopm += offIopm;
3678 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
3679 if (u16Iopm & fIopmMask)
3680 {
3681 if (pIoExitInfo)
3682 {
3683 static const uint32_t s_auIoOpSize[] =
3684 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
3685
3686 static const uint32_t s_auIoAddrSize[] =
3687 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
3688
3689 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
3690 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
3691 pIoExitInfo->n.u1Str = fStrIo;
3692 pIoExitInfo->n.u1Rep = fRep;
3693 pIoExitInfo->n.u3Seg = iEffSeg & 7;
3694 pIoExitInfo->n.u1Type = enmIoType;
3695 pIoExitInfo->n.u16Port = u16Port;
3696 }
3697 return true;
3698 }
3699
3700 /** @todo remove later (for debugging as VirtualBox always traps all IO
3701 * intercepts). */
3702 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
3703 return false;
3704}
3705
3706
3707/**
3708 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
3709 *
3710 * @returns VBox status code.
3711 * @param idMsr The MSR being requested.
3712 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
3713 * bitmap for @a idMsr.
3714 * @param puMsrpmBit Where to store the bit offset starting at the byte
3715 * returned in @a pbOffMsrpm.
3716 */
3717VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
3718{
3719 Assert(pbOffMsrpm);
3720 Assert(puMsrpmBit);
3721
3722 /*
3723 * MSRPM Layout:
3724 * Byte offset MSR range
3725 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
3726 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
3727 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
3728 * 0x1800 - 0x1fff Reserved
3729 *
3730 * Each MSR is represented by 2 permission bits (read and write).
3731 */
3732 if (idMsr <= 0x00001fff)
3733 {
3734 /* Pentium-compatible MSRs. */
3735 uint32_t const bitoffMsr = idMsr << 1;
3736 *pbOffMsrpm = bitoffMsr >> 3;
3737 *puMsrpmBit = bitoffMsr & 7;
3738 return VINF_SUCCESS;
3739 }
3740
3741 if ( idMsr >= 0xc0000000
3742 && idMsr <= 0xc0001fff)
3743 {
3744 /* AMD Sixth Generation x86 Processor MSRs. */
3745 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
3746 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
3747 *puMsrpmBit = bitoffMsr & 7;
3748 return VINF_SUCCESS;
3749 }
3750
3751 if ( idMsr >= 0xc0010000
3752 && idMsr <= 0xc0011fff)
3753 {
3754 /* AMD Seventh and Eighth Generation Processor MSRs. */
3755 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
3756 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
3757 *puMsrpmBit = bitoffMsr & 7;
3758 return VINF_SUCCESS;
3759 }
3760
3761 *pbOffMsrpm = 0;
3762 *puMsrpmBit = 0;
3763 return VERR_OUT_OF_RANGE;
3764}
3765
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette