VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 80983

Last change on this file since 80983 was 80814, checked in by vboxsync, 5 years ago

VMM/CPUM: Nested VMX: bugref:9180 Fix I/O bitmap accesses now that we don't have 2 different I/O bitmaps to check.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 96.6 KB
Line 
1/* $Id: CPUMAllRegs.cpp 80814 2019-09-16 07:45:42Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/hm.h>
31#include "CPUMInternal.h"
32#include <VBox/vmm/vmcc.h>
33#include <VBox/err.h>
34#include <VBox/dis.h>
35#include <VBox/log.h>
36#include <VBox/vmm/hm.h>
37#include <VBox/vmm/tm.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#ifdef IN_RING3
42# include <iprt/thread.h>
43#endif
44
45/** Disable stack frame pointer generation here. */
46#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
47# pragma optimize("y", off)
48#endif
49
50AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
51AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
52
53
54/*********************************************************************************************************************************
55* Defined Constants And Macros *
56*********************************************************************************************************************************/
57/**
58 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
59 *
60 * @returns Pointer to the Virtual CPU.
61 * @param a_pGuestCtx Pointer to the guest context.
62 */
63#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
64
65/**
66 * Lazily loads the hidden parts of a selector register when using raw-mode.
67 */
68#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
69 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
70
71/** @def CPUM_INT_ASSERT_NOT_EXTRN
72 * Macro for asserting that @a a_fNotExtrn are present.
73 *
74 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
75 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
76 */
77#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
78 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
79 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
80
81
82VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
83{
84 pVCpu->cpum.s.Hyper.cr3 = cr3;
85}
86
87VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
88{
89 return pVCpu->cpum.s.Hyper.cr3;
90}
91
92
93/** @def MAYBE_LOAD_DRx
94 * Macro for updating DRx values in raw-mode and ring-0 contexts.
95 */
96#ifdef IN_RING0
97# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { a_fnLoad(a_uValue); } while (0)
98#else
99# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
100#endif
101
102VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
103{
104 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
105 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
106}
107
108
109VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
110{
111 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
112 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
113}
114
115
116VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
117{
118 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
119 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
120}
121
122
123VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
124{
125 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
126 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
127}
128
129
130VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
131{
132 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
133}
134
135
136VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
137{
138 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
139}
140
141
142VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
143{
144 return pVCpu->cpum.s.Hyper.dr[0];
145}
146
147
148VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
149{
150 return pVCpu->cpum.s.Hyper.dr[1];
151}
152
153
154VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
155{
156 return pVCpu->cpum.s.Hyper.dr[2];
157}
158
159
160VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
161{
162 return pVCpu->cpum.s.Hyper.dr[3];
163}
164
165
166VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
167{
168 return pVCpu->cpum.s.Hyper.dr[6];
169}
170
171
172VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
173{
174 return pVCpu->cpum.s.Hyper.dr[7];
175}
176
177
178/**
179 * Gets the pointer to the internal CPUMCTXCORE structure.
180 * This is only for reading in order to save a few calls.
181 *
182 * @param pVCpu The cross context virtual CPU structure.
183 */
184VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
185{
186 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
187}
188
189
190/**
191 * Queries the pointer to the internal CPUMCTX structure.
192 *
193 * @returns The CPUMCTX pointer.
194 * @param pVCpu The cross context virtual CPU structure.
195 */
196VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
197{
198 return &pVCpu->cpum.s.Guest;
199}
200
201
202/**
203 * Queries the pointer to the internal CPUMCTXMSRS structure.
204 *
205 * This is for NEM only.
206 *
207 * @returns The CPUMCTX pointer.
208 * @param pVCpu The cross context virtual CPU structure.
209 */
210VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
211{
212 return &pVCpu->cpum.s.GuestMsrs;
213}
214
215
216VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
217{
218 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
219 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
220 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
221 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
222 return VINF_SUCCESS; /* formality, consider it void. */
223}
224
225
226VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
227{
228 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
229 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
230 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
231 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
232 return VINF_SUCCESS; /* formality, consider it void. */
233}
234
235
236VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
237{
238 pVCpu->cpum.s.Guest.tr.Sel = tr;
239 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
240 return VINF_SUCCESS; /* formality, consider it void. */
241}
242
243
244VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
245{
246 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
247 /* The caller will set more hidden bits if it has them. */
248 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
249 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
250 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
251 return VINF_SUCCESS; /* formality, consider it void. */
252}
253
254
255/**
256 * Set the guest CR0.
257 *
258 * When called in GC, the hyper CR0 may be updated if that is
259 * required. The caller only has to take special action if AM,
260 * WP, PG or PE changes.
261 *
262 * @returns VINF_SUCCESS (consider it void).
263 * @param pVCpu The cross context virtual CPU structure.
264 * @param cr0 The new CR0 value.
265 */
266VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
267{
268 /*
269 * Check for changes causing TLB flushes (for REM).
270 * The caller is responsible for calling PGM when appropriate.
271 */
272 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
273 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
274 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
275 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
276
277 /*
278 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
279 */
280 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
281 PGMCr0WpEnabled(pVCpu);
282
283 /* The ET flag is settable on a 386 and hardwired on 486+. */
284 if ( !(cr0 & X86_CR0_ET)
285 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
286 cr0 |= X86_CR0_ET;
287
288 pVCpu->cpum.s.Guest.cr0 = cr0;
289 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
290 return VINF_SUCCESS;
291}
292
293
294VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
295{
296 pVCpu->cpum.s.Guest.cr2 = cr2;
297 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
298 return VINF_SUCCESS;
299}
300
301
302VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
303{
304 pVCpu->cpum.s.Guest.cr3 = cr3;
305 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
306 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
307 return VINF_SUCCESS;
308}
309
310
311VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
312{
313 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
314
315 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
316 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
317 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
318
319 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
320 pVCpu->cpum.s.Guest.cr4 = cr4;
321 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
322 return VINF_SUCCESS;
323}
324
325
326VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
327{
328 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
329 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
330 return VINF_SUCCESS;
331}
332
333
334VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
335{
336 pVCpu->cpum.s.Guest.eip = eip;
337 return VINF_SUCCESS;
338}
339
340
341VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
342{
343 pVCpu->cpum.s.Guest.eax = eax;
344 return VINF_SUCCESS;
345}
346
347
348VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
349{
350 pVCpu->cpum.s.Guest.ebx = ebx;
351 return VINF_SUCCESS;
352}
353
354
355VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
356{
357 pVCpu->cpum.s.Guest.ecx = ecx;
358 return VINF_SUCCESS;
359}
360
361
362VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
363{
364 pVCpu->cpum.s.Guest.edx = edx;
365 return VINF_SUCCESS;
366}
367
368
369VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
370{
371 pVCpu->cpum.s.Guest.esp = esp;
372 return VINF_SUCCESS;
373}
374
375
376VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
377{
378 pVCpu->cpum.s.Guest.ebp = ebp;
379 return VINF_SUCCESS;
380}
381
382
383VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
384{
385 pVCpu->cpum.s.Guest.esi = esi;
386 return VINF_SUCCESS;
387}
388
389
390VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
391{
392 pVCpu->cpum.s.Guest.edi = edi;
393 return VINF_SUCCESS;
394}
395
396
397VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
398{
399 pVCpu->cpum.s.Guest.ss.Sel = ss;
400 return VINF_SUCCESS;
401}
402
403
404VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
405{
406 pVCpu->cpum.s.Guest.cs.Sel = cs;
407 return VINF_SUCCESS;
408}
409
410
411VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
412{
413 pVCpu->cpum.s.Guest.ds.Sel = ds;
414 return VINF_SUCCESS;
415}
416
417
418VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
419{
420 pVCpu->cpum.s.Guest.es.Sel = es;
421 return VINF_SUCCESS;
422}
423
424
425VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
426{
427 pVCpu->cpum.s.Guest.fs.Sel = fs;
428 return VINF_SUCCESS;
429}
430
431
432VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
433{
434 pVCpu->cpum.s.Guest.gs.Sel = gs;
435 return VINF_SUCCESS;
436}
437
438
439VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
440{
441 pVCpu->cpum.s.Guest.msrEFER = val;
442 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
443}
444
445
446VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
447{
448 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
449 if (pcbLimit)
450 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
451 return pVCpu->cpum.s.Guest.idtr.pIdt;
452}
453
454
455VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
456{
457 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
458 if (pHidden)
459 *pHidden = pVCpu->cpum.s.Guest.tr;
460 return pVCpu->cpum.s.Guest.tr.Sel;
461}
462
463
464VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
465{
466 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
467 return pVCpu->cpum.s.Guest.cs.Sel;
468}
469
470
471VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
472{
473 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
474 return pVCpu->cpum.s.Guest.ds.Sel;
475}
476
477
478VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
479{
480 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
481 return pVCpu->cpum.s.Guest.es.Sel;
482}
483
484
485VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
486{
487 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
488 return pVCpu->cpum.s.Guest.fs.Sel;
489}
490
491
492VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
493{
494 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
495 return pVCpu->cpum.s.Guest.gs.Sel;
496}
497
498
499VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
500{
501 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
502 return pVCpu->cpum.s.Guest.ss.Sel;
503}
504
505
506VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
507{
508 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
509 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
510 if ( !CPUMIsGuestInLongMode(pVCpu)
511 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
512 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
513 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
514}
515
516
517VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
518{
519 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
520 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
521 if ( !CPUMIsGuestInLongMode(pVCpu)
522 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
523 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
524 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
525}
526
527
528VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
529{
530 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
531 return pVCpu->cpum.s.Guest.ldtr.Sel;
532}
533
534
535VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
536{
537 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
538 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
539 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
540 return pVCpu->cpum.s.Guest.ldtr.Sel;
541}
542
543
544VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
545{
546 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
547 return pVCpu->cpum.s.Guest.cr0;
548}
549
550
551VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
552{
553 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
554 return pVCpu->cpum.s.Guest.cr2;
555}
556
557
558VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
559{
560 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
561 return pVCpu->cpum.s.Guest.cr3;
562}
563
564
565VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
566{
567 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
568 return pVCpu->cpum.s.Guest.cr4;
569}
570
571
572VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
573{
574 uint64_t u64;
575 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
576 if (RT_FAILURE(rc))
577 u64 = 0;
578 return u64;
579}
580
581
582VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
583{
584 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
585 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
586}
587
588
589VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
590{
591 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
592 return pVCpu->cpum.s.Guest.eip;
593}
594
595
596VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
597{
598 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
599 return pVCpu->cpum.s.Guest.rip;
600}
601
602
603VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
604{
605 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
606 return pVCpu->cpum.s.Guest.eax;
607}
608
609
610VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
611{
612 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
613 return pVCpu->cpum.s.Guest.ebx;
614}
615
616
617VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
618{
619 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
620 return pVCpu->cpum.s.Guest.ecx;
621}
622
623
624VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
625{
626 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
627 return pVCpu->cpum.s.Guest.edx;
628}
629
630
631VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
632{
633 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
634 return pVCpu->cpum.s.Guest.esi;
635}
636
637
638VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
639{
640 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
641 return pVCpu->cpum.s.Guest.edi;
642}
643
644
645VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
646{
647 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
648 return pVCpu->cpum.s.Guest.esp;
649}
650
651
652VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
653{
654 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
655 return pVCpu->cpum.s.Guest.ebp;
656}
657
658
659VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
660{
661 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
662 return pVCpu->cpum.s.Guest.eflags.u32;
663}
664
665
666VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
667{
668 switch (iReg)
669 {
670 case DISCREG_CR0:
671 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
672 *pValue = pVCpu->cpum.s.Guest.cr0;
673 break;
674
675 case DISCREG_CR2:
676 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
677 *pValue = pVCpu->cpum.s.Guest.cr2;
678 break;
679
680 case DISCREG_CR3:
681 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
682 *pValue = pVCpu->cpum.s.Guest.cr3;
683 break;
684
685 case DISCREG_CR4:
686 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
687 *pValue = pVCpu->cpum.s.Guest.cr4;
688 break;
689
690 case DISCREG_CR8:
691 {
692 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
693 uint8_t u8Tpr;
694 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
695 if (RT_FAILURE(rc))
696 {
697 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
698 *pValue = 0;
699 return rc;
700 }
701 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
702 break;
703 }
704
705 default:
706 return VERR_INVALID_PARAMETER;
707 }
708 return VINF_SUCCESS;
709}
710
711
712VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
713{
714 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
715 return pVCpu->cpum.s.Guest.dr[0];
716}
717
718
719VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
720{
721 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
722 return pVCpu->cpum.s.Guest.dr[1];
723}
724
725
726VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
727{
728 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
729 return pVCpu->cpum.s.Guest.dr[2];
730}
731
732
733VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
734{
735 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
736 return pVCpu->cpum.s.Guest.dr[3];
737}
738
739
740VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
741{
742 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
743 return pVCpu->cpum.s.Guest.dr[6];
744}
745
746
747VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
748{
749 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
750 return pVCpu->cpum.s.Guest.dr[7];
751}
752
753
754VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
755{
756 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
757 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
758 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
759 if (iReg == 4 || iReg == 5)
760 iReg += 2;
761 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
762 return VINF_SUCCESS;
763}
764
765
766VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
767{
768 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
769 return pVCpu->cpum.s.Guest.msrEFER;
770}
771
772
773/**
774 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
775 *
776 * @returns Pointer to the leaf if found, NULL if not.
777 *
778 * @param pVM The cross context VM structure.
779 * @param uLeaf The leaf to get.
780 */
781PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
782{
783 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
784 if (iEnd)
785 {
786 unsigned iStart = 0;
787 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
788 for (;;)
789 {
790 unsigned i = iStart + (iEnd - iStart) / 2U;
791 if (uLeaf < paLeaves[i].uLeaf)
792 {
793 if (i <= iStart)
794 return NULL;
795 iEnd = i;
796 }
797 else if (uLeaf > paLeaves[i].uLeaf)
798 {
799 i += 1;
800 if (i >= iEnd)
801 return NULL;
802 iStart = i;
803 }
804 else
805 {
806 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
807 return &paLeaves[i];
808
809 /* This shouldn't normally happen. But in case the it does due
810 to user configuration overrids or something, just return the
811 first sub-leaf. */
812 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
813 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
814 while ( paLeaves[i].uSubLeaf != 0
815 && i > 0
816 && uLeaf == paLeaves[i - 1].uLeaf)
817 i--;
818 return &paLeaves[i];
819 }
820 }
821 }
822
823 return NULL;
824}
825
826
827/**
828 * Looks up a CPUID leaf in the CPUID leaf array.
829 *
830 * @returns Pointer to the leaf if found, NULL if not.
831 *
832 * @param pVM The cross context VM structure.
833 * @param uLeaf The leaf to get.
834 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
835 * isn't.
836 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
837 */
838PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
839{
840 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
841 if (iEnd)
842 {
843 unsigned iStart = 0;
844 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
845 for (;;)
846 {
847 unsigned i = iStart + (iEnd - iStart) / 2U;
848 if (uLeaf < paLeaves[i].uLeaf)
849 {
850 if (i <= iStart)
851 return NULL;
852 iEnd = i;
853 }
854 else if (uLeaf > paLeaves[i].uLeaf)
855 {
856 i += 1;
857 if (i >= iEnd)
858 return NULL;
859 iStart = i;
860 }
861 else
862 {
863 uSubLeaf &= paLeaves[i].fSubLeafMask;
864 if (uSubLeaf == paLeaves[i].uSubLeaf)
865 *pfExactSubLeafHit = true;
866 else
867 {
868 /* Find the right subleaf. We return the last one before
869 uSubLeaf if we don't find an exact match. */
870 if (uSubLeaf < paLeaves[i].uSubLeaf)
871 while ( i > 0
872 && uLeaf == paLeaves[i - 1].uLeaf
873 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
874 i--;
875 else
876 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
877 && uLeaf == paLeaves[i + 1].uLeaf
878 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
879 i++;
880 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
881 }
882 return &paLeaves[i];
883 }
884 }
885 }
886
887 *pfExactSubLeafHit = false;
888 return NULL;
889}
890
891
892/**
893 * Gets a CPUID leaf.
894 *
895 * @param pVCpu The cross context virtual CPU structure.
896 * @param uLeaf The CPUID leaf to get.
897 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
898 * @param pEax Where to store the EAX value.
899 * @param pEbx Where to store the EBX value.
900 * @param pEcx Where to store the ECX value.
901 * @param pEdx Where to store the EDX value.
902 */
903VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
904 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
905{
906 bool fExactSubLeafHit;
907 PVM pVM = pVCpu->CTX_SUFF(pVM);
908 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
909 if (pLeaf)
910 {
911 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
912 if (fExactSubLeafHit)
913 {
914 *pEax = pLeaf->uEax;
915 *pEbx = pLeaf->uEbx;
916 *pEcx = pLeaf->uEcx;
917 *pEdx = pLeaf->uEdx;
918
919 /*
920 * Deal with CPU specific information.
921 */
922 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
923 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
924 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
925 {
926 if (uLeaf == 1)
927 {
928 /* EBX: Bits 31-24: Initial APIC ID. */
929 Assert(pVCpu->idCpu <= 255);
930 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
931 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
932
933 /* EDX: Bit 9: AND with APICBASE.EN. */
934 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
935 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
936
937 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
938 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
939 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
940 }
941 else if (uLeaf == 0xb)
942 {
943 /* EDX: Initial extended APIC ID. */
944 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
945 *pEdx = pVCpu->idCpu;
946 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
947 }
948 else if (uLeaf == UINT32_C(0x8000001e))
949 {
950 /* EAX: Initial extended APIC ID. */
951 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
952 *pEax = pVCpu->idCpu;
953 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
954 }
955 else if (uLeaf == UINT32_C(0x80000001))
956 {
957 /* EDX: Bit 9: AND with APICBASE.EN. */
958 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
959 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
960 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
961 }
962 else
963 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
964 }
965 }
966 /*
967 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
968 * them here, but we do the best we can here...
969 */
970 else
971 {
972 *pEax = *pEbx = *pEcx = *pEdx = 0;
973 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
974 {
975 *pEcx = uSubLeaf & 0xff;
976 *pEdx = pVCpu->idCpu;
977 }
978 }
979 }
980 else
981 {
982 /*
983 * Different CPUs have different ways of dealing with unknown CPUID leaves.
984 */
985 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
986 {
987 default:
988 AssertFailed();
989 RT_FALL_THRU();
990 case CPUMUNKNOWNCPUID_DEFAULTS:
991 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
992 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
993 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
994 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
995 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
996 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
997 break;
998 case CPUMUNKNOWNCPUID_PASSTHRU:
999 *pEax = uLeaf;
1000 *pEbx = 0;
1001 *pEcx = uSubLeaf;
1002 *pEdx = 0;
1003 break;
1004 }
1005 }
1006 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1007}
1008
1009
1010/**
1011 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1012 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1013 *
1014 * @returns Previous value.
1015 * @param pVCpu The cross context virtual CPU structure to make the
1016 * change on. Usually the calling EMT.
1017 * @param fVisible Whether to make it visible (true) or hide it (false).
1018 *
1019 * @remarks This is "VMMDECL" so that it still links with
1020 * the old APIC code which is in VBoxDD2 and not in
1021 * the VMM module.
1022 */
1023VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1024{
1025 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1026 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1027 return fOld;
1028}
1029
1030
1031/**
1032 * Gets the host CPU vendor.
1033 *
1034 * @returns CPU vendor.
1035 * @param pVM The cross context VM structure.
1036 */
1037VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1038{
1039 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1040}
1041
1042
1043/**
1044 * Gets the host CPU microarchitecture.
1045 *
1046 * @returns CPU microarchitecture.
1047 * @param pVM The cross context VM structure.
1048 */
1049VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
1050{
1051 return pVM->cpum.s.HostFeatures.enmMicroarch;
1052}
1053
1054
1055/**
1056 * Gets the guest CPU vendor.
1057 *
1058 * @returns CPU vendor.
1059 * @param pVM The cross context VM structure.
1060 */
1061VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1062{
1063 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1064}
1065
1066
1067/**
1068 * Gets the guest CPU microarchitecture.
1069 *
1070 * @returns CPU microarchitecture.
1071 * @param pVM The cross context VM structure.
1072 */
1073VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
1074{
1075 return pVM->cpum.s.GuestFeatures.enmMicroarch;
1076}
1077
1078
1079VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
1080{
1081 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1082 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1083}
1084
1085
1086VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
1087{
1088 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1089 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1090}
1091
1092
1093VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
1094{
1095 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1096 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1097}
1098
1099
1100VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
1101{
1102 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1103 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1104}
1105
1106
1107VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1108{
1109 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1110 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1111 return VINF_SUCCESS; /* No need to recalc. */
1112}
1113
1114
1115VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
1116{
1117 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1118 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1119 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1120}
1121
1122
1123VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
1124{
1125 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1126 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1127 if (iReg == 4 || iReg == 5)
1128 iReg += 2;
1129 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1130 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1131}
1132
1133
1134/**
1135 * Recalculates the hypervisor DRx register values based on current guest
1136 * registers and DBGF breakpoints, updating changed registers depending on the
1137 * context.
1138 *
1139 * This is called whenever a guest DRx register is modified (any context) and
1140 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1141 *
1142 * In raw-mode context this function will reload any (hyper) DRx registers which
1143 * comes out with a different value. It may also have to save the host debug
1144 * registers if that haven't been done already. In this context though, we'll
1145 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1146 * are only important when breakpoints are actually enabled.
1147 *
1148 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1149 * reloaded by the HM code if it changes. Further more, we will only use the
1150 * combined register set when the VBox debugger is actually using hardware BPs,
1151 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1152 * concern us here).
1153 *
1154 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1155 * all the time.
1156 *
1157 * @returns VINF_SUCCESS.
1158 * @param pVCpu The cross context virtual CPU structure.
1159 * @param iGstReg The guest debug register number that was modified.
1160 * UINT8_MAX if not guest register.
1161 * @param fForceHyper Used in HM to force hyper registers because of single
1162 * stepping.
1163 */
1164VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg, bool fForceHyper)
1165{
1166 PVM pVM = pVCpu->CTX_SUFF(pVM);
1167#ifndef IN_RING0
1168 RT_NOREF_PV(iGstReg);
1169#endif
1170
1171 /*
1172 * Compare the DR7s first.
1173 *
1174 * We only care about the enabled flags. GD is virtualized when we
1175 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1176 * always have the LE and GE bits set, so no need to check and disable
1177 * stuff if they're cleared like we have to for the guest DR7.
1178 */
1179 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1180 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1181 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1182 uGstDr7 = 0;
1183 else if (!(uGstDr7 & X86_DR7_LE))
1184 uGstDr7 &= ~X86_DR7_LE_ALL;
1185 else if (!(uGstDr7 & X86_DR7_GE))
1186 uGstDr7 &= ~X86_DR7_GE_ALL;
1187
1188 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1189
1190 /** @todo r=bird: I'm totally confused by fForceHyper! */
1191#ifdef IN_RING0
1192 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1193 fForceHyper = true;
1194#endif
1195 if ((!fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1196 {
1197 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1198
1199 /*
1200 * Ok, something is enabled. Recalc each of the breakpoints, taking
1201 * the VM debugger ones of the guest ones. In raw-mode context we will
1202 * not allow breakpoints with values inside the hypervisor area.
1203 */
1204 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1205
1206 /* bp 0 */
1207 RTGCUINTREG uNewDr0;
1208 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1209 {
1210 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1211 uNewDr0 = DBGFBpGetDR0(pVM);
1212 }
1213 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1214 {
1215 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1216 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1217 }
1218 else
1219 uNewDr0 = 0;
1220
1221 /* bp 1 */
1222 RTGCUINTREG uNewDr1;
1223 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1224 {
1225 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1226 uNewDr1 = DBGFBpGetDR1(pVM);
1227 }
1228 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1229 {
1230 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1231 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1232 }
1233 else
1234 uNewDr1 = 0;
1235
1236 /* bp 2 */
1237 RTGCUINTREG uNewDr2;
1238 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1239 {
1240 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1241 uNewDr2 = DBGFBpGetDR2(pVM);
1242 }
1243 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1244 {
1245 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1246 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1247 }
1248 else
1249 uNewDr2 = 0;
1250
1251 /* bp 3 */
1252 RTGCUINTREG uNewDr3;
1253 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1254 {
1255 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1256 uNewDr3 = DBGFBpGetDR3(pVM);
1257 }
1258 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1259 {
1260 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1261 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1262 }
1263 else
1264 uNewDr3 = 0;
1265
1266 /*
1267 * Apply the updates.
1268 */
1269 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1270 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1271 CPUMSetHyperDR3(pVCpu, uNewDr3);
1272 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1273 CPUMSetHyperDR2(pVCpu, uNewDr2);
1274 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1275 CPUMSetHyperDR1(pVCpu, uNewDr1);
1276 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1277 CPUMSetHyperDR0(pVCpu, uNewDr0);
1278 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1279 CPUMSetHyperDR7(pVCpu, uNewDr7);
1280 }
1281#ifdef IN_RING0
1282 else if (CPUMIsGuestDebugStateActive(pVCpu))
1283 {
1284 /*
1285 * Reload the register that was modified. Normally this won't happen
1286 * as we won't intercept DRx writes when not having the hyper debug
1287 * state loaded, but in case we do for some reason we'll simply deal
1288 * with it.
1289 */
1290 switch (iGstReg)
1291 {
1292 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1293 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1294 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1295 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1296 default:
1297 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1298 }
1299 }
1300#endif
1301 else
1302 {
1303 /*
1304 * No active debug state any more. In raw-mode this means we have to
1305 * make sure DR7 has everything disabled now, if we armed it already.
1306 * In ring-0 we might end up here when just single stepping.
1307 */
1308#ifdef IN_RING0
1309 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1310 {
1311 if (pVCpu->cpum.s.Hyper.dr[0])
1312 ASMSetDR0(0);
1313 if (pVCpu->cpum.s.Hyper.dr[1])
1314 ASMSetDR1(0);
1315 if (pVCpu->cpum.s.Hyper.dr[2])
1316 ASMSetDR2(0);
1317 if (pVCpu->cpum.s.Hyper.dr[3])
1318 ASMSetDR3(0);
1319 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1320 }
1321#endif
1322 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1323
1324 /* Clear all the registers. */
1325 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1326 pVCpu->cpum.s.Hyper.dr[3] = 0;
1327 pVCpu->cpum.s.Hyper.dr[2] = 0;
1328 pVCpu->cpum.s.Hyper.dr[1] = 0;
1329 pVCpu->cpum.s.Hyper.dr[0] = 0;
1330
1331 }
1332 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1333 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1334 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1335 pVCpu->cpum.s.Hyper.dr[7]));
1336
1337 return VINF_SUCCESS;
1338}
1339
1340
1341/**
1342 * Set the guest XCR0 register.
1343 *
1344 * Will load additional state if the FPU state is already loaded (in ring-0 &
1345 * raw-mode context).
1346 *
1347 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1348 * value.
1349 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1350 * @param uNewValue The new value.
1351 * @thread EMT(pVCpu)
1352 */
1353VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
1354{
1355 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1356 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1357 /* The X87 bit cannot be cleared. */
1358 && (uNewValue & XSAVE_C_X87)
1359 /* AVX requires SSE. */
1360 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1361 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1362 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1363 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1364 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1365 )
1366 {
1367 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1368
1369 /* If more state components are enabled, we need to take care to load
1370 them if the FPU/SSE state is already loaded. May otherwise leak
1371 host state to the guest. */
1372 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1373 if (fNewComponents)
1374 {
1375#ifdef IN_RING0
1376 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1377 {
1378 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1379 /* Adding more components. */
1380 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1381 else
1382 {
1383 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1384 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1385 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1386 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1387 }
1388 }
1389#endif
1390 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1391 }
1392 return VINF_SUCCESS;
1393 }
1394 return VERR_CPUM_RAISE_GP_0;
1395}
1396
1397
1398/**
1399 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1400 *
1401 * @returns true if in real mode, otherwise false.
1402 * @param pVCpu The cross context virtual CPU structure.
1403 */
1404VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1405{
1406 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1407 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1408}
1409
1410
1411/**
1412 * Tests if the guest has the Page Size Extension enabled (PSE).
1413 *
1414 * @returns true if in real mode, otherwise false.
1415 * @param pVCpu The cross context virtual CPU structure.
1416 */
1417VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1418{
1419 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1420 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1421 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1422}
1423
1424
1425/**
1426 * Tests if the guest has the paging enabled (PG).
1427 *
1428 * @returns true if in real mode, otherwise false.
1429 * @param pVCpu The cross context virtual CPU structure.
1430 */
1431VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1432{
1433 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1434 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1435}
1436
1437
1438/**
1439 * Tests if the guest has the paging enabled (PG).
1440 *
1441 * @returns true if in real mode, otherwise false.
1442 * @param pVCpu The cross context virtual CPU structure.
1443 */
1444VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1445{
1446 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1447 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1448}
1449
1450
1451/**
1452 * Tests if the guest is running in real mode or not.
1453 *
1454 * @returns true if in real mode, otherwise false.
1455 * @param pVCpu The cross context virtual CPU structure.
1456 */
1457VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1458{
1459 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1460 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1461}
1462
1463
1464/**
1465 * Tests if the guest is running in real or virtual 8086 mode.
1466 *
1467 * @returns @c true if it is, @c false if not.
1468 * @param pVCpu The cross context virtual CPU structure.
1469 */
1470VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1471{
1472 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1473 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1474 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1475}
1476
1477
1478/**
1479 * Tests if the guest is running in protected or not.
1480 *
1481 * @returns true if in protected mode, otherwise false.
1482 * @param pVCpu The cross context virtual CPU structure.
1483 */
1484VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
1485{
1486 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1487 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1488}
1489
1490
1491/**
1492 * Tests if the guest is running in paged protected or not.
1493 *
1494 * @returns true if in paged protected mode, otherwise false.
1495 * @param pVCpu The cross context virtual CPU structure.
1496 */
1497VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
1498{
1499 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1500 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1501}
1502
1503
1504/**
1505 * Tests if the guest is running in long mode or not.
1506 *
1507 * @returns true if in long mode, otherwise false.
1508 * @param pVCpu The cross context virtual CPU structure.
1509 */
1510VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
1511{
1512 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1513 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1514}
1515
1516
1517/**
1518 * Tests if the guest is running in PAE mode or not.
1519 *
1520 * @returns true if in PAE mode, otherwise false.
1521 * @param pVCpu The cross context virtual CPU structure.
1522 */
1523VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
1524{
1525 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1526 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1527 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1528 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1529 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1530 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1531}
1532
1533
1534/**
1535 * Tests if the guest is running in 64 bits mode or not.
1536 *
1537 * @returns true if in 64 bits protected mode, otherwise false.
1538 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1539 */
1540VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1541{
1542 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
1543 if (!CPUMIsGuestInLongMode(pVCpu))
1544 return false;
1545 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1546 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1547}
1548
1549
1550/**
1551 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1552 * registers.
1553 *
1554 * @returns true if in 64 bits protected mode, otherwise false.
1555 * @param pCtx Pointer to the current guest CPU context.
1556 */
1557VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1558{
1559 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1560}
1561
1562
1563/**
1564 * Sets the specified changed flags (CPUM_CHANGED_*).
1565 *
1566 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1567 * @param fChangedAdd The changed flags to add.
1568 */
1569VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
1570{
1571 pVCpu->cpum.s.fChanged |= fChangedAdd;
1572}
1573
1574
1575/**
1576 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
1577 *
1578 * @returns true if supported.
1579 * @returns false if not supported.
1580 * @param pVM The cross context VM structure.
1581 */
1582VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
1583{
1584 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
1585}
1586
1587
1588/**
1589 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1590 * @returns true if used.
1591 * @returns false if not used.
1592 * @param pVM The cross context VM structure.
1593 */
1594VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1595{
1596 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
1597}
1598
1599
1600/**
1601 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1602 * @returns true if used.
1603 * @returns false if not used.
1604 * @param pVM The cross context VM structure.
1605 */
1606VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1607{
1608 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
1609}
1610
1611
1612/**
1613 * Checks if we activated the FPU/XMM state of the guest OS.
1614 *
1615 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
1616 * time we'll be executing guest code, so it may return true for 64-on-32 when
1617 * we still haven't actually loaded the FPU status, just scheduled it to be
1618 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
1619 *
1620 * @returns true / false.
1621 * @param pVCpu The cross context virtual CPU structure.
1622 */
1623VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1624{
1625 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
1626}
1627
1628
1629/**
1630 * Checks if we've really loaded the FPU/XMM state of the guest OS.
1631 *
1632 * @returns true / false.
1633 * @param pVCpu The cross context virtual CPU structure.
1634 */
1635VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
1636{
1637 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1638}
1639
1640
1641/**
1642 * Checks if we saved the FPU/XMM state of the host OS.
1643 *
1644 * @returns true / false.
1645 * @param pVCpu The cross context virtual CPU structure.
1646 */
1647VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
1648{
1649 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
1650}
1651
1652
1653/**
1654 * Checks if the guest debug state is active.
1655 *
1656 * @returns boolean
1657 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1658 */
1659VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1660{
1661 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
1662}
1663
1664
1665/**
1666 * Checks if the guest debug state is to be made active during the world-switch
1667 * (currently only used for the 32->64 switcher case).
1668 *
1669 * @returns boolean
1670 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1671 */
1672VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
1673{
1674 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
1675}
1676
1677
1678/**
1679 * Checks if the hyper debug state is active.
1680 *
1681 * @returns boolean
1682 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1683 */
1684VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1685{
1686 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
1687}
1688
1689
1690/**
1691 * Checks if the hyper debug state is to be made active during the world-switch
1692 * (currently only used for the 32->64 switcher case).
1693 *
1694 * @returns boolean
1695 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1696 */
1697VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
1698{
1699 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
1700}
1701
1702
1703/**
1704 * Mark the guest's debug state as inactive.
1705 *
1706 * @returns boolean
1707 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1708 * @todo This API doesn't make sense any more.
1709 */
1710VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1711{
1712 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
1713 NOREF(pVCpu);
1714}
1715
1716
1717/**
1718 * Get the current privilege level of the guest.
1719 *
1720 * @returns CPL
1721 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1722 */
1723VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
1724{
1725 /*
1726 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
1727 *
1728 * Note! We used to check CS.DPL here, assuming it was always equal to
1729 * CPL even if a conforming segment was loaded. But this turned out to
1730 * only apply to older AMD-V. With VT-x we had an ACP2 regression
1731 * during install after a far call to ring 2 with VT-x. Then on newer
1732 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
1733 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
1734 *
1735 * So, forget CS.DPL, always use SS.DPL.
1736 *
1737 * Note! The SS RPL is always equal to the CPL, while the CS RPL
1738 * isn't necessarily equal if the segment is conforming.
1739 * See section 4.11.1 in the AMD manual.
1740 *
1741 * Update: Where the heck does it say CS.RPL can differ from CPL other than
1742 * right after real->prot mode switch and when in V8086 mode? That
1743 * section says the RPL specified in a direct transfere (call, jmp,
1744 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
1745 * it would be impossible for an exception handle or the iret
1746 * instruction to figure out whether SS:ESP are part of the frame
1747 * or not. VBox or qemu bug must've lead to this misconception.
1748 *
1749 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
1750 * selector into SS with an RPL other than the CPL when CPL != 3 and
1751 * we're in 64-bit mode. The intel dev box doesn't allow this, on
1752 * RPL = CPL. Weird.
1753 */
1754 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
1755 uint32_t uCpl;
1756 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1757 {
1758 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1759 {
1760 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
1761 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
1762 else
1763 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
1764 }
1765 else
1766 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
1767 }
1768 else
1769 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
1770 return uCpl;
1771}
1772
1773
1774/**
1775 * Gets the current guest CPU mode.
1776 *
1777 * If paging mode is what you need, check out PGMGetGuestMode().
1778 *
1779 * @returns The CPU mode.
1780 * @param pVCpu The cross context virtual CPU structure.
1781 */
1782VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
1783{
1784 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1785 CPUMMODE enmMode;
1786 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1787 enmMode = CPUMMODE_REAL;
1788 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1789 enmMode = CPUMMODE_PROTECTED;
1790 else
1791 enmMode = CPUMMODE_LONG;
1792
1793 return enmMode;
1794}
1795
1796
1797/**
1798 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
1799 *
1800 * @returns 16, 32 or 64.
1801 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1802 */
1803VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
1804{
1805 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1806
1807 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1808 return 16;
1809
1810 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1811 {
1812 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1813 return 16;
1814 }
1815
1816 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1817 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1818 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1819 return 64;
1820
1821 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1822 return 32;
1823
1824 return 16;
1825}
1826
1827
1828VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
1829{
1830 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1831
1832 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1833 return DISCPUMODE_16BIT;
1834
1835 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1836 {
1837 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1838 return DISCPUMODE_16BIT;
1839 }
1840
1841 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1842 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1843 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1844 return DISCPUMODE_64BIT;
1845
1846 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1847 return DISCPUMODE_32BIT;
1848
1849 return DISCPUMODE_16BIT;
1850}
1851
1852
1853/**
1854 * Gets the guest MXCSR_MASK value.
1855 *
1856 * This does not access the x87 state, but the value we determined at VM
1857 * initialization.
1858 *
1859 * @returns MXCSR mask.
1860 * @param pVM The cross context VM structure.
1861 */
1862VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
1863{
1864 return pVM->cpum.s.GuestInfo.fMxCsrMask;
1865}
1866
1867
1868/**
1869 * Returns whether the guest has physical interrupts enabled.
1870 *
1871 * @returns @c true if interrupts are enabled, @c false otherwise.
1872 * @param pVCpu The cross context virtual CPU structure.
1873 *
1874 * @remarks Warning! This function does -not- take into account the global-interrupt
1875 * flag (GIF).
1876 */
1877VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
1878{
1879 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
1880 {
1881 uint32_t const fEFlags = pVCpu->cpum.s.Guest.eflags.u;
1882 return RT_BOOL(fEFlags & X86_EFL_IF);
1883 }
1884
1885 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
1886 return CPUMIsGuestVmxPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1887
1888 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
1889 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1890}
1891
1892
1893/**
1894 * Returns whether the nested-guest has virtual interrupts enabled.
1895 *
1896 * @returns @c true if interrupts are enabled, @c false otherwise.
1897 * @param pVCpu The cross context virtual CPU structure.
1898 *
1899 * @remarks Warning! This function does -not- take into account the global-interrupt
1900 * flag (GIF).
1901 */
1902VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
1903{
1904 Assert(CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest));
1905
1906 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
1907 return CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1908
1909 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
1910 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1911}
1912
1913
1914/**
1915 * Calculates the interruptiblity of the guest.
1916 *
1917 * @returns Interruptibility level.
1918 * @param pVCpu The cross context virtual CPU structure.
1919 */
1920VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
1921{
1922#if 1
1923 /* Global-interrupt flag blocks pretty much everything we care about here. */
1924 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
1925 {
1926 /*
1927 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
1928 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
1929 * or raw-mode). Hence we use the function below which handles the details.
1930 */
1931 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
1932 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1933 {
1934 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
1935 || CPUMIsGuestVirtIntrEnabled(pVCpu))
1936 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1937
1938 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
1939 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
1940 }
1941
1942 /*
1943 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
1944 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
1945 * However, there is some uncertainity regarding the converse, i.e. whether
1946 * NMI-blocking until IRET blocks delivery of physical interrupts.
1947 *
1948 * See Intel spec. 25.4.1 "Event Blocking".
1949 */
1950 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1951 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1952
1953 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1954 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1955
1956 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1957 }
1958 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1959#else
1960 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
1961 {
1962 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1963 {
1964 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1965 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1966
1967 /** @todo does blocking NMIs mean interrupts are also inhibited? */
1968 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1969 {
1970 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1971 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1972 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1973 }
1974 AssertFailed();
1975 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1976 }
1977 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1978 }
1979 else
1980 {
1981 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1982 {
1983 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1984 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1985 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1986 }
1987 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1988 }
1989#endif
1990}
1991
1992
1993/**
1994 * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.
1995 *
1996 * @returns @c true if NMIs are blocked, @c false otherwise.
1997 * @param pVCpu The cross context virtual CPU structure.
1998 */
1999VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
2000{
2001 /*
2002 * Return the state of guest-NMI blocking in any of the following cases:
2003 * - We're not executing a nested-guest.
2004 * - We're executing an SVM nested-guest[1].
2005 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2006 *
2007 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2008 * SVM hypervisors must track NMI blocking themselves by intercepting
2009 * the IRET instruction after injection of an NMI.
2010 */
2011 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2012 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2013 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2014 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2015 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2016
2017 /*
2018 * Return the state of virtual-NMI blocking, if we are executing a
2019 * VMX nested-guest with virtual-NMIs enabled.
2020 */
2021 return CPUMIsGuestVmxVirtNmiBlocking(pVCpu, pCtx);
2022}
2023
2024
2025/**
2026 * Sets blocking delivery of NMIs to the guest.
2027 *
2028 * @param pVCpu The cross context virtual CPU structure.
2029 * @param fBlock Whether NMIs are blocked or not.
2030 */
2031VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
2032{
2033 /*
2034 * Set the state of guest-NMI blocking in any of the following cases:
2035 * - We're not executing a nested-guest.
2036 * - We're executing an SVM nested-guest[1].
2037 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2038 *
2039 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2040 * SVM hypervisors must track NMI blocking themselves by intercepting
2041 * the IRET instruction after injection of an NMI.
2042 */
2043 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2044 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2045 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2046 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2047 {
2048 if (fBlock)
2049 {
2050 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2051 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2052 }
2053 else
2054 {
2055 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2056 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2057 }
2058 return;
2059 }
2060
2061 /*
2062 * Set the state of virtual-NMI blocking, if we are executing a
2063 * VMX nested-guest with virtual-NMIs enabled.
2064 */
2065 return CPUMSetGuestVmxVirtNmiBlocking(pVCpu, pCtx, fBlock);
2066}
2067
2068
2069/**
2070 * Checks whether the SVM nested-guest has physical interrupts enabled.
2071 *
2072 * @returns true if interrupts are enabled, false otherwise.
2073 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2074 * @param pCtx The guest-CPU context.
2075 *
2076 * @remarks This does -not- take into account the global-interrupt flag.
2077 */
2078VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2079{
2080 /** @todo Optimization: Avoid this function call and use a pointer to the
2081 * relevant eflags instead (setup during VMRUN instruction emulation). */
2082 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2083
2084 X86EFLAGS fEFlags;
2085 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2086 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2087 else
2088 fEFlags.u = pCtx->eflags.u;
2089
2090 return fEFlags.Bits.u1IF;
2091}
2092
2093
2094/**
2095 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2096 * for injection by VMRUN instruction) interrupts.
2097 *
2098 * @returns VBox status code.
2099 * @retval true if it's ready, false otherwise.
2100 *
2101 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2102 * @param pCtx The guest-CPU context.
2103 */
2104VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2105{
2106 RT_NOREF(pVCpu);
2107 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2108
2109 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2110 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2111 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2112 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2113 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2114 return false;
2115
2116 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2117}
2118
2119
2120/**
2121 * Gets the pending SVM nested-guest interruptvector.
2122 *
2123 * @returns The nested-guest interrupt to inject.
2124 * @param pCtx The guest-CPU context.
2125 */
2126VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2127{
2128 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2129 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
2130}
2131
2132
2133/**
2134 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2135 *
2136 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2137 * @param pCtx The guest-CPU context.
2138 */
2139VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
2140{
2141 /*
2142 * Reload the guest's "host state".
2143 */
2144 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2145 pCtx->es = pHostState->es;
2146 pCtx->cs = pHostState->cs;
2147 pCtx->ss = pHostState->ss;
2148 pCtx->ds = pHostState->ds;
2149 pCtx->gdtr = pHostState->gdtr;
2150 pCtx->idtr = pHostState->idtr;
2151 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2152 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2153 pCtx->cr3 = pHostState->uCr3;
2154 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2155 pCtx->rflags = pHostState->rflags;
2156 pCtx->rflags.Bits.u1VM = 0;
2157 pCtx->rip = pHostState->uRip;
2158 pCtx->rsp = pHostState->uRsp;
2159 pCtx->rax = pHostState->uRax;
2160 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2161 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2162 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2163
2164 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2165 * raise \#GP(0) in the guest. */
2166
2167 /** @todo check the loaded host-state for consistency. Figure out what
2168 * exactly this involves? */
2169}
2170
2171
2172/**
2173 * Saves the host-state to the host-state save area as part of a VMRUN.
2174 *
2175 * @param pCtx The guest-CPU context.
2176 * @param cbInstr The length of the VMRUN instruction in bytes.
2177 */
2178VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2179{
2180 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2181 pHostState->es = pCtx->es;
2182 pHostState->cs = pCtx->cs;
2183 pHostState->ss = pCtx->ss;
2184 pHostState->ds = pCtx->ds;
2185 pHostState->gdtr = pCtx->gdtr;
2186 pHostState->idtr = pCtx->idtr;
2187 pHostState->uEferMsr = pCtx->msrEFER;
2188 pHostState->uCr0 = pCtx->cr0;
2189 pHostState->uCr3 = pCtx->cr3;
2190 pHostState->uCr4 = pCtx->cr4;
2191 pHostState->rflags = pCtx->rflags;
2192 pHostState->uRip = pCtx->rip + cbInstr;
2193 pHostState->uRsp = pCtx->rsp;
2194 pHostState->uRax = pCtx->rax;
2195}
2196
2197
2198/**
2199 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2200 * nested-guest.
2201 *
2202 * @returns The TSC offset after applying any nested-guest TSC offset.
2203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2204 * @param uTscValue The guest TSC.
2205 *
2206 * @sa CPUMRemoveNestedGuestTscOffset.
2207 */
2208VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2209{
2210 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2211 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2212 {
2213 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2214 Assert(pVmcs);
2215 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2216 return uTscValue + pVmcs->u64TscOffset.u;
2217 return uTscValue;
2218 }
2219
2220 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2221 {
2222 uint64_t offTsc;
2223 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2224 {
2225 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2226 Assert(pVmcb);
2227 offTsc = pVmcb->ctrl.u64TSCOffset;
2228 }
2229 return uTscValue + offTsc;
2230 }
2231 return uTscValue;
2232}
2233
2234
2235/**
2236 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
2237 * guest.
2238 *
2239 * @returns The TSC offset after removing any nested-guest TSC offset.
2240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2241 * @param uTscValue The nested-guest TSC.
2242 *
2243 * @sa CPUMApplyNestedGuestTscOffset.
2244 */
2245VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2246{
2247 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2248 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2249 {
2250 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2251 {
2252 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2253 Assert(pVmcs);
2254 return uTscValue - pVmcs->u64TscOffset.u;
2255 }
2256 return uTscValue;
2257 }
2258
2259 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2260 {
2261 uint64_t offTsc;
2262 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2263 {
2264 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2265 Assert(pVmcb);
2266 offTsc = pVmcb->ctrl.u64TSCOffset;
2267 }
2268 return uTscValue - offTsc;
2269 }
2270 return uTscValue;
2271}
2272
2273
2274/**
2275 * Used to dynamically imports state residing in NEM or HM.
2276 *
2277 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2278 *
2279 * @returns VBox status code.
2280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2281 * @param fExtrnImport The fields to import.
2282 * @thread EMT(pVCpu)
2283 */
2284VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
2285{
2286 VMCPU_ASSERT_EMT(pVCpu);
2287 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2288 {
2289 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2290 {
2291 case CPUMCTX_EXTRN_KEEPER_NEM:
2292 {
2293 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
2294 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2295 return rc;
2296 }
2297
2298 case CPUMCTX_EXTRN_KEEPER_HM:
2299 {
2300#ifdef IN_RING0
2301 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
2302 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2303 return rc;
2304#else
2305 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
2306 return VINF_SUCCESS;
2307#endif
2308 }
2309 default:
2310 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2311 }
2312 }
2313 return VINF_SUCCESS;
2314}
2315
2316
2317/**
2318 * Gets valid CR4 bits for the guest.
2319 *
2320 * @returns Valid CR4 bits.
2321 * @param pVM The cross context VM structure.
2322 */
2323VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
2324{
2325 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
2326 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
2327 | X86_CR4_TSD | X86_CR4_DE
2328 | X86_CR4_PSE | X86_CR4_PAE
2329 | X86_CR4_MCE | X86_CR4_PGE
2330 | X86_CR4_PCE
2331 | X86_CR4_OSXMMEEXCPT; /** @todo r=ramshankar: Introduced in Pentium III along with SSE. Check fSse here? */
2332 if (pGuestFeatures->fFxSaveRstor)
2333 fMask |= X86_CR4_OSFXSR;
2334 if (pGuestFeatures->fVmx)
2335 fMask |= X86_CR4_VMXE;
2336 if (pGuestFeatures->fXSaveRstor)
2337 fMask |= X86_CR4_OSXSAVE;
2338 if (pGuestFeatures->fPcid)
2339 fMask |= X86_CR4_PCIDE;
2340 if (pGuestFeatures->fFsGsBase)
2341 fMask |= X86_CR4_FSGSBASE;
2342 return fMask;
2343}
2344
2345
2346/**
2347 * Gets the read and write permission bits for an MSR in an MSR bitmap.
2348 *
2349 * @returns VMXMSRPM_XXX - the MSR permission.
2350 * @param pvMsrBitmap Pointer to the MSR bitmap.
2351 * @param idMsr The MSR to get permissions for.
2352 *
2353 * @sa hmR0VmxSetMsrPermission.
2354 */
2355VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
2356{
2357 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2358
2359 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
2360
2361 /*
2362 * MSR Layout:
2363 * Byte index MSR range Interpreted as
2364 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2365 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2366 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2367 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2368 *
2369 * A bit corresponding to an MSR within the above range causes a VM-exit
2370 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2371 * the MSR range, it always cause a VM-exit.
2372 *
2373 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2374 */
2375 uint32_t const offBitmapRead = 0;
2376 uint32_t const offBitmapWrite = 0x800;
2377 uint32_t offMsr;
2378 uint32_t iBit;
2379 if (idMsr <= UINT32_C(0x00001fff))
2380 {
2381 offMsr = 0;
2382 iBit = idMsr;
2383 }
2384 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2385 {
2386 offMsr = 0x400;
2387 iBit = idMsr - UINT32_C(0xc0000000);
2388 }
2389 else
2390 {
2391 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
2392 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
2393 }
2394
2395 /*
2396 * Get the MSR read permissions.
2397 */
2398 uint32_t fRet;
2399 uint32_t const offMsrRead = offBitmapRead + offMsr;
2400 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2401 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
2402 fRet = VMXMSRPM_EXIT_RD;
2403 else
2404 fRet = VMXMSRPM_ALLOW_RD;
2405
2406 /*
2407 * Get the MSR write permissions.
2408 */
2409 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
2410 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2411 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
2412 fRet |= VMXMSRPM_EXIT_WR;
2413 else
2414 fRet |= VMXMSRPM_ALLOW_WR;
2415
2416 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
2417 return fRet;
2418}
2419
2420
2421/**
2422 * Checks the permission bits for the specified I/O port from the given I/O bitmap
2423 * to see if causes a VM-exit.
2424 *
2425 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
2426 * @param pvIoBitmap Pointer to I/O bitmap.
2427 * @param uPort The I/O port being accessed.
2428 * @param cbAccess e size of the I/O access in bytes (1, 2 or 4 bytes).
2429 */
2430static bool cpumGetVmxIoBitmapPermission(void const *pvIoBitmap, uint16_t uPort, uint8_t cbAccess)
2431{
2432 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
2433
2434 /*
2435 * If the I/O port access wraps around the 16-bit port I/O space, we must cause a
2436 * VM-exit.
2437 *
2438 * Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc are valid and do not
2439 * constitute a wrap around. However, reading 2 bytes at port 0xffff or 4 bytes
2440 * from port 0xffff/0xfffe/0xfffd constitute a wrap around. In other words, any
2441 * access to -both- ports 0xffff and port 0 is a wrap around.
2442 *
2443 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2444 */
2445 uint32_t const uPortLast = uPort + cbAccess;
2446 if (uPortLast > 0x10000)
2447 return true;
2448
2449 /*
2450 * If any bit corresponding to the I/O access is set, we must cause a VM-exit.
2451 */
2452 uint8_t const *pbIoBitmap = (uint8_t const *)pvIoBitmap;
2453 uint16_t const offPerm = uPort >> 3; /* Byte offset of the port. */
2454 uint16_t const idxPermBit = uPort - (offPerm << 3); /* Bit offset within byte. */
2455 Assert(idxPermBit < 8);
2456 static const uint8_t s_afMask[] = { 0x0, 0x1, 0x3, 0x7, 0xf }; /* Bit-mask for all access sizes. */
2457 uint16_t const fMask = s_afMask[cbAccess] << idxPermBit; /* Bit-mask of the access. */
2458
2459 /* Fetch 8 or 16-bits depending on whether the access spans 8-bit boundary. */
2460 RTUINT16U uPerm;
2461 uPerm.s.Lo = *(pbIoBitmap + offPerm);
2462 if (idxPermBit + cbAccess > 8)
2463 uPerm.s.Hi = *(pbIoBitmap + 1 + offPerm);
2464 else
2465 uPerm.s.Hi = 0;
2466
2467 /* If any bit for the access is 1, we must cause a VM-exit. */
2468 if (uPerm.u & fMask)
2469 return true;
2470
2471 return false;
2472}
2473
2474
2475/**
2476 * Returns whether the given VMCS field is valid and supported for the guest.
2477 *
2478 * @param pVM The cross context VM structure.
2479 * @param u64VmcsField The VMCS field.
2480 *
2481 * @remarks This takes into account the CPU features exposed to the guest.
2482 */
2483VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
2484{
2485 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
2486 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
2487 if (!uFieldEncHi)
2488 { /* likely */ }
2489 else
2490 return false;
2491
2492 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
2493 switch (uFieldEncLo)
2494 {
2495 /*
2496 * 16-bit fields.
2497 */
2498 /* Control fields. */
2499 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
2500 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
2501 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
2502
2503 /* Guest-state fields. */
2504 case VMX_VMCS16_GUEST_ES_SEL:
2505 case VMX_VMCS16_GUEST_CS_SEL:
2506 case VMX_VMCS16_GUEST_SS_SEL:
2507 case VMX_VMCS16_GUEST_DS_SEL:
2508 case VMX_VMCS16_GUEST_FS_SEL:
2509 case VMX_VMCS16_GUEST_GS_SEL:
2510 case VMX_VMCS16_GUEST_LDTR_SEL:
2511 case VMX_VMCS16_GUEST_TR_SEL: return true;
2512 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
2513 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
2514
2515 /* Host-state fields. */
2516 case VMX_VMCS16_HOST_ES_SEL:
2517 case VMX_VMCS16_HOST_CS_SEL:
2518 case VMX_VMCS16_HOST_SS_SEL:
2519 case VMX_VMCS16_HOST_DS_SEL:
2520 case VMX_VMCS16_HOST_FS_SEL:
2521 case VMX_VMCS16_HOST_GS_SEL:
2522 case VMX_VMCS16_HOST_TR_SEL: return true;
2523
2524 /*
2525 * 64-bit fields.
2526 */
2527 /* Control fields. */
2528 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
2529 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
2530 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
2531 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
2532 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
2533 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
2534 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
2535 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
2536 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
2537 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
2538 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
2539 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
2540 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
2541 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
2542 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
2543 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
2544 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
2545 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
2546 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
2547 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
2548 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
2549 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
2550 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
2551 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
2552 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
2553 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
2554 case VMX_VMCS64_CTRL_EPTP_FULL:
2555 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
2556 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
2557 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
2558 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
2559 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
2560 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
2561 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
2562 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
2563 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
2564 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
2565 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
2566 {
2567 PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
2568 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
2569 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
2570 }
2571 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
2572 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
2573 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
2574 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
2575 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
2576 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
2577 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
2578 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
2579 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
2580 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
2581 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
2582 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
2583
2584 /* Read-only data fields. */
2585 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
2586 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
2587
2588 /* Guest-state fields. */
2589 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
2590 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
2591 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
2592 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
2593 case VMX_VMCS64_GUEST_PAT_FULL:
2594 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
2595 case VMX_VMCS64_GUEST_EFER_FULL:
2596 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
2597 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
2598 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
2599 case VMX_VMCS64_GUEST_PDPTE0_FULL:
2600 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
2601 case VMX_VMCS64_GUEST_PDPTE1_FULL:
2602 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
2603 case VMX_VMCS64_GUEST_PDPTE2_FULL:
2604 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
2605 case VMX_VMCS64_GUEST_PDPTE3_FULL:
2606 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
2607 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
2608 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
2609
2610 /* Host-state fields. */
2611 case VMX_VMCS64_HOST_PAT_FULL:
2612 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
2613 case VMX_VMCS64_HOST_EFER_FULL:
2614 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
2615 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
2616 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
2617
2618 /*
2619 * 32-bit fields.
2620 */
2621 /* Control fields. */
2622 case VMX_VMCS32_CTRL_PIN_EXEC:
2623 case VMX_VMCS32_CTRL_PROC_EXEC:
2624 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
2625 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
2626 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
2627 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
2628 case VMX_VMCS32_CTRL_EXIT:
2629 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
2630 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
2631 case VMX_VMCS32_CTRL_ENTRY:
2632 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
2633 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
2634 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
2635 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
2636 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
2637 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
2638 case VMX_VMCS32_CTRL_PLE_GAP:
2639 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
2640
2641 /* Read-only data fields. */
2642 case VMX_VMCS32_RO_VM_INSTR_ERROR:
2643 case VMX_VMCS32_RO_EXIT_REASON:
2644 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
2645 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
2646 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
2647 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
2648 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
2649 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
2650
2651 /* Guest-state fields. */
2652 case VMX_VMCS32_GUEST_ES_LIMIT:
2653 case VMX_VMCS32_GUEST_CS_LIMIT:
2654 case VMX_VMCS32_GUEST_SS_LIMIT:
2655 case VMX_VMCS32_GUEST_DS_LIMIT:
2656 case VMX_VMCS32_GUEST_FS_LIMIT:
2657 case VMX_VMCS32_GUEST_GS_LIMIT:
2658 case VMX_VMCS32_GUEST_LDTR_LIMIT:
2659 case VMX_VMCS32_GUEST_TR_LIMIT:
2660 case VMX_VMCS32_GUEST_GDTR_LIMIT:
2661 case VMX_VMCS32_GUEST_IDTR_LIMIT:
2662 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
2663 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
2664 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
2665 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
2666 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
2667 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
2668 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
2669 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
2670 case VMX_VMCS32_GUEST_INT_STATE:
2671 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
2672 case VMX_VMCS32_GUEST_SMBASE:
2673 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
2674 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
2675
2676 /* Host-state fields. */
2677 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
2678
2679 /*
2680 * Natural-width fields.
2681 */
2682 /* Control fields. */
2683 case VMX_VMCS_CTRL_CR0_MASK:
2684 case VMX_VMCS_CTRL_CR4_MASK:
2685 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
2686 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
2687 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
2688 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
2689 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
2690 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
2691
2692 /* Read-only data fields. */
2693 case VMX_VMCS_RO_EXIT_QUALIFICATION:
2694 case VMX_VMCS_RO_IO_RCX:
2695 case VMX_VMCS_RO_IO_RSI:
2696 case VMX_VMCS_RO_IO_RDI:
2697 case VMX_VMCS_RO_IO_RIP:
2698 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
2699
2700 /* Guest-state fields. */
2701 case VMX_VMCS_GUEST_CR0:
2702 case VMX_VMCS_GUEST_CR3:
2703 case VMX_VMCS_GUEST_CR4:
2704 case VMX_VMCS_GUEST_ES_BASE:
2705 case VMX_VMCS_GUEST_CS_BASE:
2706 case VMX_VMCS_GUEST_SS_BASE:
2707 case VMX_VMCS_GUEST_DS_BASE:
2708 case VMX_VMCS_GUEST_FS_BASE:
2709 case VMX_VMCS_GUEST_GS_BASE:
2710 case VMX_VMCS_GUEST_LDTR_BASE:
2711 case VMX_VMCS_GUEST_TR_BASE:
2712 case VMX_VMCS_GUEST_GDTR_BASE:
2713 case VMX_VMCS_GUEST_IDTR_BASE:
2714 case VMX_VMCS_GUEST_DR7:
2715 case VMX_VMCS_GUEST_RSP:
2716 case VMX_VMCS_GUEST_RIP:
2717 case VMX_VMCS_GUEST_RFLAGS:
2718 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
2719 case VMX_VMCS_GUEST_SYSENTER_ESP:
2720 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
2721
2722 /* Host-state fields. */
2723 case VMX_VMCS_HOST_CR0:
2724 case VMX_VMCS_HOST_CR3:
2725 case VMX_VMCS_HOST_CR4:
2726 case VMX_VMCS_HOST_FS_BASE:
2727 case VMX_VMCS_HOST_GS_BASE:
2728 case VMX_VMCS_HOST_TR_BASE:
2729 case VMX_VMCS_HOST_GDTR_BASE:
2730 case VMX_VMCS_HOST_IDTR_BASE:
2731 case VMX_VMCS_HOST_SYSENTER_ESP:
2732 case VMX_VMCS_HOST_SYSENTER_EIP:
2733 case VMX_VMCS_HOST_RSP:
2734 case VMX_VMCS_HOST_RIP: return true;
2735 }
2736
2737 return false;
2738}
2739
2740
2741/**
2742 * Checks whether the given I/O access should cause a nested-guest VM-exit.
2743 *
2744 * @returns @c true if it causes a VM-exit, @c false otherwise.
2745 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2746 * @param u16Port The I/O port being accessed.
2747 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2748 */
2749VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2750{
2751 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2752 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
2753 return true;
2754
2755 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
2756 {
2757 uint8_t const *pbIoBitmap = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap);
2758 Assert(pbIoBitmap);
2759 return cpumGetVmxIoBitmapPermission(pbIoBitmap, u16Port, cbAccess);
2760 }
2761
2762 return false;
2763}
2764
2765
2766/**
2767 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
2768 *
2769 * @returns @c true if it causes a VM-exit, @c false otherwise.
2770 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2771 * @param uNewCr3 The CR3 value being written.
2772 */
2773VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
2774{
2775 /*
2776 * If the CR3-load exiting control is set and the new CR3 value does not
2777 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
2778 *
2779 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2780 */
2781 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2782 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2783 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
2784 {
2785 uint32_t const uCr3TargetCount = pVmcs->u32Cr3TargetCount;
2786 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
2787
2788 /* If the CR3-target count is 0, cause a VM-exit. */
2789 if (uCr3TargetCount == 0)
2790 return true;
2791
2792 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
2793 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
2794 if ( uNewCr3 != pVmcs->u64Cr3Target0.u
2795 && uNewCr3 != pVmcs->u64Cr3Target1.u
2796 && uNewCr3 != pVmcs->u64Cr3Target2.u
2797 && uNewCr3 != pVmcs->u64Cr3Target3.u)
2798 return true;
2799 }
2800 return false;
2801}
2802
2803
2804/**
2805 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
2806 * VM-exit or not.
2807 *
2808 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
2809 * @param pVCpu The cross context virtual CPU structure.
2810 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
2811 * VMX_EXIT_VMREAD).
2812 * @param u64VmcsField The VMCS field.
2813 */
2814VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
2815{
2816 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
2817 Assert( uExitReason == VMX_EXIT_VMREAD
2818 || uExitReason == VMX_EXIT_VMWRITE);
2819
2820 /*
2821 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
2822 */
2823 if (!CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
2824 return true;
2825
2826 /*
2827 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
2828 * is intercepted. This excludes any reserved bits in the valid parts of the field
2829 * encoding (i.e. bit 12).
2830 */
2831 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
2832 return true;
2833
2834 /*
2835 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
2836 */
2837 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
2838 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
2839 ? (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
2840 : (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
2841 Assert(pbBitmap);
2842 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
2843 return ASMBitTest(pbBitmap + (u32VmcsField >> 3), u32VmcsField & 7);
2844}
2845
2846
2847
2848/**
2849 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
2850 *
2851 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
2852 * @param u16Port The IO port being accessed.
2853 * @param enmIoType The type of IO access.
2854 * @param cbReg The IO operand size in bytes.
2855 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
2856 * @param iEffSeg The effective segment number.
2857 * @param fRep Whether this is a repeating IO instruction (REP prefix).
2858 * @param fStrIo Whether this is a string IO instruction.
2859 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
2860 * Optional, can be NULL.
2861 */
2862VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
2863 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
2864 PSVMIOIOEXITINFO pIoExitInfo)
2865{
2866 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
2867 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
2868
2869 /*
2870 * The IOPM layout:
2871 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
2872 * two 4K pages.
2873 *
2874 * For IO instructions that access more than a single byte, the permission bits
2875 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
2876 *
2877 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
2878 * we need 3 extra bits beyond the second 4K page.
2879 */
2880 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
2881
2882 uint16_t const offIopm = u16Port >> 3;
2883 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
2884 uint8_t const cShift = u16Port - (offIopm << 3);
2885 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
2886
2887 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
2888 Assert(pbIopm);
2889 pbIopm += offIopm;
2890 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
2891 if (u16Iopm & fIopmMask)
2892 {
2893 if (pIoExitInfo)
2894 {
2895 static const uint32_t s_auIoOpSize[] =
2896 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
2897
2898 static const uint32_t s_auIoAddrSize[] =
2899 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
2900
2901 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
2902 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
2903 pIoExitInfo->n.u1Str = fStrIo;
2904 pIoExitInfo->n.u1Rep = fRep;
2905 pIoExitInfo->n.u3Seg = iEffSeg & 7;
2906 pIoExitInfo->n.u1Type = enmIoType;
2907 pIoExitInfo->n.u16Port = u16Port;
2908 }
2909 return true;
2910 }
2911
2912 /** @todo remove later (for debugging as VirtualBox always traps all IO
2913 * intercepts). */
2914 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
2915 return false;
2916}
2917
2918
2919/**
2920 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
2921 *
2922 * @returns VBox status code.
2923 * @param idMsr The MSR being requested.
2924 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
2925 * bitmap for @a idMsr.
2926 * @param puMsrpmBit Where to store the bit offset starting at the byte
2927 * returned in @a pbOffMsrpm.
2928 */
2929VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
2930{
2931 Assert(pbOffMsrpm);
2932 Assert(puMsrpmBit);
2933
2934 /*
2935 * MSRPM Layout:
2936 * Byte offset MSR range
2937 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
2938 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
2939 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
2940 * 0x1800 - 0x1fff Reserved
2941 *
2942 * Each MSR is represented by 2 permission bits (read and write).
2943 */
2944 if (idMsr <= 0x00001fff)
2945 {
2946 /* Pentium-compatible MSRs. */
2947 uint32_t const bitoffMsr = idMsr << 1;
2948 *pbOffMsrpm = bitoffMsr >> 3;
2949 *puMsrpmBit = bitoffMsr & 7;
2950 return VINF_SUCCESS;
2951 }
2952
2953 if ( idMsr >= 0xc0000000
2954 && idMsr <= 0xc0001fff)
2955 {
2956 /* AMD Sixth Generation x86 Processor MSRs. */
2957 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
2958 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
2959 *puMsrpmBit = bitoffMsr & 7;
2960 return VINF_SUCCESS;
2961 }
2962
2963 if ( idMsr >= 0xc0010000
2964 && idMsr <= 0xc0011fff)
2965 {
2966 /* AMD Seventh and Eighth Generation Processor MSRs. */
2967 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
2968 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
2969 *puMsrpmBit = bitoffMsr & 7;
2970 return VINF_SUCCESS;
2971 }
2972
2973 *pbOffMsrpm = 0;
2974 *puMsrpmBit = 0;
2975 return VERR_OUT_OF_RANGE;
2976}
2977
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette