VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 80293

Last change on this file since 80293 was 80293, checked in by vboxsync, 6 years ago

VMM(CPUM),DevPcBios: Added CPUM methods for getting the guest (and host) microarchitecture so DevPcBios doesn't need to include vm.h. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 96.0 KB
Line 
1/* $Id: CPUMAllRegs.cpp 80293 2019-08-15 15:38:39Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_BUGREF_9217_PART_I
23#define LOG_GROUP LOG_GROUP_CPUM
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/apic.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#include <VBox/vmm/nem.h>
31#include <VBox/vmm/hm.h>
32#include "CPUMInternal.h"
33#include <VBox/vmm/vmcc.h>
34#include <VBox/err.h>
35#include <VBox/dis.h>
36#include <VBox/log.h>
37#include <VBox/vmm/hm.h>
38#include <VBox/vmm/tm.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/asm-amd64-x86.h>
42#ifdef IN_RING3
43# include <iprt/thread.h>
44#endif
45
46/** Disable stack frame pointer generation here. */
47#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
48# pragma optimize("y", off)
49#endif
50
51AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
52AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58/**
59 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
60 *
61 * @returns Pointer to the Virtual CPU.
62 * @param a_pGuestCtx Pointer to the guest context.
63 */
64#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
65
66/**
67 * Lazily loads the hidden parts of a selector register when using raw-mode.
68 */
69#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
70 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
71
72/** @def CPUM_INT_ASSERT_NOT_EXTRN
73 * Macro for asserting that @a a_fNotExtrn are present.
74 *
75 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
76 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
77 */
78#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
79 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
80 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
81
82
83VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
84{
85 pVCpu->cpum.s.Hyper.cr3 = cr3;
86}
87
88VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
89{
90 return pVCpu->cpum.s.Hyper.cr3;
91}
92
93
94/** @def MAYBE_LOAD_DRx
95 * Macro for updating DRx values in raw-mode and ring-0 contexts.
96 */
97#ifdef IN_RING0
98# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { a_fnLoad(a_uValue); } while (0)
99#else
100# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
101#endif
102
103VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
104{
105 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
106 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
107}
108
109
110VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
111{
112 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
113 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
114}
115
116
117VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
118{
119 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
120 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
121}
122
123
124VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
125{
126 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
127 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
128}
129
130
131VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
132{
133 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
134}
135
136
137VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
138{
139 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
140}
141
142
143VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
144{
145 return pVCpu->cpum.s.Hyper.dr[0];
146}
147
148
149VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
150{
151 return pVCpu->cpum.s.Hyper.dr[1];
152}
153
154
155VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
156{
157 return pVCpu->cpum.s.Hyper.dr[2];
158}
159
160
161VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
162{
163 return pVCpu->cpum.s.Hyper.dr[3];
164}
165
166
167VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
168{
169 return pVCpu->cpum.s.Hyper.dr[6];
170}
171
172
173VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
174{
175 return pVCpu->cpum.s.Hyper.dr[7];
176}
177
178
179/**
180 * Gets the pointer to the internal CPUMCTXCORE structure.
181 * This is only for reading in order to save a few calls.
182 *
183 * @param pVCpu The cross context virtual CPU structure.
184 */
185VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
186{
187 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
188}
189
190
191/**
192 * Queries the pointer to the internal CPUMCTX structure.
193 *
194 * @returns The CPUMCTX pointer.
195 * @param pVCpu The cross context virtual CPU structure.
196 */
197VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
198{
199 return &pVCpu->cpum.s.Guest;
200}
201
202
203/**
204 * Queries the pointer to the internal CPUMCTXMSRS structure.
205 *
206 * This is for NEM only.
207 *
208 * @returns The CPUMCTX pointer.
209 * @param pVCpu The cross context virtual CPU structure.
210 */
211VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
212{
213 return &pVCpu->cpum.s.GuestMsrs;
214}
215
216
217VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
218{
219 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
220 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
221 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
222 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
223 return VINF_SUCCESS; /* formality, consider it void. */
224}
225
226
227VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
228{
229 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
230 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
231 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
232 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
233 return VINF_SUCCESS; /* formality, consider it void. */
234}
235
236
237VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
238{
239 pVCpu->cpum.s.Guest.tr.Sel = tr;
240 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
241 return VINF_SUCCESS; /* formality, consider it void. */
242}
243
244
245VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
246{
247 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
248 /* The caller will set more hidden bits if it has them. */
249 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
250 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
251 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
252 return VINF_SUCCESS; /* formality, consider it void. */
253}
254
255
256/**
257 * Set the guest CR0.
258 *
259 * When called in GC, the hyper CR0 may be updated if that is
260 * required. The caller only has to take special action if AM,
261 * WP, PG or PE changes.
262 *
263 * @returns VINF_SUCCESS (consider it void).
264 * @param pVCpu The cross context virtual CPU structure.
265 * @param cr0 The new CR0 value.
266 */
267VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
268{
269 /*
270 * Check for changes causing TLB flushes (for REM).
271 * The caller is responsible for calling PGM when appropriate.
272 */
273 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
274 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
275 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
276 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
277
278 /*
279 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
280 */
281 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
282 PGMCr0WpEnabled(pVCpu);
283
284 /* The ET flag is settable on a 386 and hardwired on 486+. */
285 if ( !(cr0 & X86_CR0_ET)
286 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
287 cr0 |= X86_CR0_ET;
288
289 pVCpu->cpum.s.Guest.cr0 = cr0;
290 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
291 return VINF_SUCCESS;
292}
293
294
295VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
296{
297 pVCpu->cpum.s.Guest.cr2 = cr2;
298 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
299 return VINF_SUCCESS;
300}
301
302
303VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
304{
305 pVCpu->cpum.s.Guest.cr3 = cr3;
306 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
307 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
308 return VINF_SUCCESS;
309}
310
311
312VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
313{
314 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
315
316 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
317 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
318 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
319
320 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
321 pVCpu->cpum.s.Guest.cr4 = cr4;
322 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
323 return VINF_SUCCESS;
324}
325
326
327VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
328{
329 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
330 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
331 return VINF_SUCCESS;
332}
333
334
335VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
336{
337 pVCpu->cpum.s.Guest.eip = eip;
338 return VINF_SUCCESS;
339}
340
341
342VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
343{
344 pVCpu->cpum.s.Guest.eax = eax;
345 return VINF_SUCCESS;
346}
347
348
349VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
350{
351 pVCpu->cpum.s.Guest.ebx = ebx;
352 return VINF_SUCCESS;
353}
354
355
356VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
357{
358 pVCpu->cpum.s.Guest.ecx = ecx;
359 return VINF_SUCCESS;
360}
361
362
363VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
364{
365 pVCpu->cpum.s.Guest.edx = edx;
366 return VINF_SUCCESS;
367}
368
369
370VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
371{
372 pVCpu->cpum.s.Guest.esp = esp;
373 return VINF_SUCCESS;
374}
375
376
377VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
378{
379 pVCpu->cpum.s.Guest.ebp = ebp;
380 return VINF_SUCCESS;
381}
382
383
384VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
385{
386 pVCpu->cpum.s.Guest.esi = esi;
387 return VINF_SUCCESS;
388}
389
390
391VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
392{
393 pVCpu->cpum.s.Guest.edi = edi;
394 return VINF_SUCCESS;
395}
396
397
398VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
399{
400 pVCpu->cpum.s.Guest.ss.Sel = ss;
401 return VINF_SUCCESS;
402}
403
404
405VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
406{
407 pVCpu->cpum.s.Guest.cs.Sel = cs;
408 return VINF_SUCCESS;
409}
410
411
412VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
413{
414 pVCpu->cpum.s.Guest.ds.Sel = ds;
415 return VINF_SUCCESS;
416}
417
418
419VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
420{
421 pVCpu->cpum.s.Guest.es.Sel = es;
422 return VINF_SUCCESS;
423}
424
425
426VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
427{
428 pVCpu->cpum.s.Guest.fs.Sel = fs;
429 return VINF_SUCCESS;
430}
431
432
433VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
434{
435 pVCpu->cpum.s.Guest.gs.Sel = gs;
436 return VINF_SUCCESS;
437}
438
439
440VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
441{
442 pVCpu->cpum.s.Guest.msrEFER = val;
443 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
444}
445
446
447VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
448{
449 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
450 if (pcbLimit)
451 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
452 return pVCpu->cpum.s.Guest.idtr.pIdt;
453}
454
455
456VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
457{
458 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
459 if (pHidden)
460 *pHidden = pVCpu->cpum.s.Guest.tr;
461 return pVCpu->cpum.s.Guest.tr.Sel;
462}
463
464
465VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
466{
467 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
468 return pVCpu->cpum.s.Guest.cs.Sel;
469}
470
471
472VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
473{
474 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
475 return pVCpu->cpum.s.Guest.ds.Sel;
476}
477
478
479VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
480{
481 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
482 return pVCpu->cpum.s.Guest.es.Sel;
483}
484
485
486VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
487{
488 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
489 return pVCpu->cpum.s.Guest.fs.Sel;
490}
491
492
493VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
494{
495 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
496 return pVCpu->cpum.s.Guest.gs.Sel;
497}
498
499
500VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
501{
502 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
503 return pVCpu->cpum.s.Guest.ss.Sel;
504}
505
506
507VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
508{
509 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
510 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
511 if ( !CPUMIsGuestInLongMode(pVCpu)
512 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
513 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
514 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
515}
516
517
518VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
519{
520 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
521 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
522 if ( !CPUMIsGuestInLongMode(pVCpu)
523 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
524 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
525 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
526}
527
528
529VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
530{
531 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
532 return pVCpu->cpum.s.Guest.ldtr.Sel;
533}
534
535
536VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
537{
538 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
539 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
540 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
541 return pVCpu->cpum.s.Guest.ldtr.Sel;
542}
543
544
545VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
546{
547 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
548 return pVCpu->cpum.s.Guest.cr0;
549}
550
551
552VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
553{
554 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
555 return pVCpu->cpum.s.Guest.cr2;
556}
557
558
559VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
560{
561 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
562 return pVCpu->cpum.s.Guest.cr3;
563}
564
565
566VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
567{
568 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
569 return pVCpu->cpum.s.Guest.cr4;
570}
571
572
573VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
574{
575 uint64_t u64;
576 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
577 if (RT_FAILURE(rc))
578 u64 = 0;
579 return u64;
580}
581
582
583VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
584{
585 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
586 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
587}
588
589
590VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
591{
592 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
593 return pVCpu->cpum.s.Guest.eip;
594}
595
596
597VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
598{
599 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
600 return pVCpu->cpum.s.Guest.rip;
601}
602
603
604VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
605{
606 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
607 return pVCpu->cpum.s.Guest.eax;
608}
609
610
611VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
612{
613 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
614 return pVCpu->cpum.s.Guest.ebx;
615}
616
617
618VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
619{
620 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
621 return pVCpu->cpum.s.Guest.ecx;
622}
623
624
625VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
626{
627 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
628 return pVCpu->cpum.s.Guest.edx;
629}
630
631
632VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
633{
634 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
635 return pVCpu->cpum.s.Guest.esi;
636}
637
638
639VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
640{
641 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
642 return pVCpu->cpum.s.Guest.edi;
643}
644
645
646VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
647{
648 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
649 return pVCpu->cpum.s.Guest.esp;
650}
651
652
653VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
654{
655 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
656 return pVCpu->cpum.s.Guest.ebp;
657}
658
659
660VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
661{
662 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
663 return pVCpu->cpum.s.Guest.eflags.u32;
664}
665
666
667VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
668{
669 switch (iReg)
670 {
671 case DISCREG_CR0:
672 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
673 *pValue = pVCpu->cpum.s.Guest.cr0;
674 break;
675
676 case DISCREG_CR2:
677 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
678 *pValue = pVCpu->cpum.s.Guest.cr2;
679 break;
680
681 case DISCREG_CR3:
682 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
683 *pValue = pVCpu->cpum.s.Guest.cr3;
684 break;
685
686 case DISCREG_CR4:
687 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
688 *pValue = pVCpu->cpum.s.Guest.cr4;
689 break;
690
691 case DISCREG_CR8:
692 {
693 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
694 uint8_t u8Tpr;
695 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
696 if (RT_FAILURE(rc))
697 {
698 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
699 *pValue = 0;
700 return rc;
701 }
702 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
703 break;
704 }
705
706 default:
707 return VERR_INVALID_PARAMETER;
708 }
709 return VINF_SUCCESS;
710}
711
712
713VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
714{
715 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
716 return pVCpu->cpum.s.Guest.dr[0];
717}
718
719
720VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
721{
722 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
723 return pVCpu->cpum.s.Guest.dr[1];
724}
725
726
727VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
728{
729 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
730 return pVCpu->cpum.s.Guest.dr[2];
731}
732
733
734VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
735{
736 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
737 return pVCpu->cpum.s.Guest.dr[3];
738}
739
740
741VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
742{
743 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
744 return pVCpu->cpum.s.Guest.dr[6];
745}
746
747
748VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
749{
750 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
751 return pVCpu->cpum.s.Guest.dr[7];
752}
753
754
755VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
756{
757 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
758 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
759 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
760 if (iReg == 4 || iReg == 5)
761 iReg += 2;
762 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
763 return VINF_SUCCESS;
764}
765
766
767VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
768{
769 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
770 return pVCpu->cpum.s.Guest.msrEFER;
771}
772
773
774/**
775 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
776 *
777 * @returns Pointer to the leaf if found, NULL if not.
778 *
779 * @param pVM The cross context VM structure.
780 * @param uLeaf The leaf to get.
781 */
782PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
783{
784 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
785 if (iEnd)
786 {
787 unsigned iStart = 0;
788 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
789 for (;;)
790 {
791 unsigned i = iStart + (iEnd - iStart) / 2U;
792 if (uLeaf < paLeaves[i].uLeaf)
793 {
794 if (i <= iStart)
795 return NULL;
796 iEnd = i;
797 }
798 else if (uLeaf > paLeaves[i].uLeaf)
799 {
800 i += 1;
801 if (i >= iEnd)
802 return NULL;
803 iStart = i;
804 }
805 else
806 {
807 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
808 return &paLeaves[i];
809
810 /* This shouldn't normally happen. But in case the it does due
811 to user configuration overrids or something, just return the
812 first sub-leaf. */
813 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
814 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
815 while ( paLeaves[i].uSubLeaf != 0
816 && i > 0
817 && uLeaf == paLeaves[i - 1].uLeaf)
818 i--;
819 return &paLeaves[i];
820 }
821 }
822 }
823
824 return NULL;
825}
826
827
828/**
829 * Looks up a CPUID leaf in the CPUID leaf array.
830 *
831 * @returns Pointer to the leaf if found, NULL if not.
832 *
833 * @param pVM The cross context VM structure.
834 * @param uLeaf The leaf to get.
835 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
836 * isn't.
837 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
838 */
839PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
840{
841 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
842 if (iEnd)
843 {
844 unsigned iStart = 0;
845 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
846 for (;;)
847 {
848 unsigned i = iStart + (iEnd - iStart) / 2U;
849 if (uLeaf < paLeaves[i].uLeaf)
850 {
851 if (i <= iStart)
852 return NULL;
853 iEnd = i;
854 }
855 else if (uLeaf > paLeaves[i].uLeaf)
856 {
857 i += 1;
858 if (i >= iEnd)
859 return NULL;
860 iStart = i;
861 }
862 else
863 {
864 uSubLeaf &= paLeaves[i].fSubLeafMask;
865 if (uSubLeaf == paLeaves[i].uSubLeaf)
866 *pfExactSubLeafHit = true;
867 else
868 {
869 /* Find the right subleaf. We return the last one before
870 uSubLeaf if we don't find an exact match. */
871 if (uSubLeaf < paLeaves[i].uSubLeaf)
872 while ( i > 0
873 && uLeaf == paLeaves[i - 1].uLeaf
874 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
875 i--;
876 else
877 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
878 && uLeaf == paLeaves[i + 1].uLeaf
879 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
880 i++;
881 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
882 }
883 return &paLeaves[i];
884 }
885 }
886 }
887
888 *pfExactSubLeafHit = false;
889 return NULL;
890}
891
892
893/**
894 * Gets a CPUID leaf.
895 *
896 * @param pVCpu The cross context virtual CPU structure.
897 * @param uLeaf The CPUID leaf to get.
898 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
899 * @param pEax Where to store the EAX value.
900 * @param pEbx Where to store the EBX value.
901 * @param pEcx Where to store the ECX value.
902 * @param pEdx Where to store the EDX value.
903 */
904VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
905 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
906{
907 bool fExactSubLeafHit;
908 PVM pVM = pVCpu->CTX_SUFF(pVM);
909 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
910 if (pLeaf)
911 {
912 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
913 if (fExactSubLeafHit)
914 {
915 *pEax = pLeaf->uEax;
916 *pEbx = pLeaf->uEbx;
917 *pEcx = pLeaf->uEcx;
918 *pEdx = pLeaf->uEdx;
919
920 /*
921 * Deal with CPU specific information.
922 */
923 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
924 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
925 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
926 {
927 if (uLeaf == 1)
928 {
929 /* EBX: Bits 31-24: Initial APIC ID. */
930 Assert(pVCpu->idCpu <= 255);
931 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
932 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
933
934 /* EDX: Bit 9: AND with APICBASE.EN. */
935 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
936 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
937
938 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
939 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
940 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
941 }
942 else if (uLeaf == 0xb)
943 {
944 /* EDX: Initial extended APIC ID. */
945 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
946 *pEdx = pVCpu->idCpu;
947 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
948 }
949 else if (uLeaf == UINT32_C(0x8000001e))
950 {
951 /* EAX: Initial extended APIC ID. */
952 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
953 *pEax = pVCpu->idCpu;
954 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
955 }
956 else if (uLeaf == UINT32_C(0x80000001))
957 {
958 /* EDX: Bit 9: AND with APICBASE.EN. */
959 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
960 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
961 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
962 }
963 else
964 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
965 }
966 }
967 /*
968 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
969 * them here, but we do the best we can here...
970 */
971 else
972 {
973 *pEax = *pEbx = *pEcx = *pEdx = 0;
974 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
975 {
976 *pEcx = uSubLeaf & 0xff;
977 *pEdx = pVCpu->idCpu;
978 }
979 }
980 }
981 else
982 {
983 /*
984 * Different CPUs have different ways of dealing with unknown CPUID leaves.
985 */
986 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
987 {
988 default:
989 AssertFailed();
990 RT_FALL_THRU();
991 case CPUMUNKNOWNCPUID_DEFAULTS:
992 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
993 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
994 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
995 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
996 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
997 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
998 break;
999 case CPUMUNKNOWNCPUID_PASSTHRU:
1000 *pEax = uLeaf;
1001 *pEbx = 0;
1002 *pEcx = uSubLeaf;
1003 *pEdx = 0;
1004 break;
1005 }
1006 }
1007 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1008}
1009
1010
1011/**
1012 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1013 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1014 *
1015 * @returns Previous value.
1016 * @param pVCpu The cross context virtual CPU structure to make the
1017 * change on. Usually the calling EMT.
1018 * @param fVisible Whether to make it visible (true) or hide it (false).
1019 *
1020 * @remarks This is "VMMDECL" so that it still links with
1021 * the old APIC code which is in VBoxDD2 and not in
1022 * the VMM module.
1023 */
1024VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1025{
1026 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1027 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1028 return fOld;
1029}
1030
1031
1032/**
1033 * Gets the host CPU vendor.
1034 *
1035 * @returns CPU vendor.
1036 * @param pVM The cross context VM structure.
1037 */
1038VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1039{
1040 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1041}
1042
1043
1044/**
1045 * Gets the host CPU microarchitecture.
1046 *
1047 * @returns CPU microarchitecture.
1048 * @param pVM The cross context VM structure.
1049 */
1050VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
1051{
1052 return pVM->cpum.s.HostFeatures.enmMicroarch;
1053}
1054
1055
1056/**
1057 * Gets the guest CPU vendor.
1058 *
1059 * @returns CPU vendor.
1060 * @param pVM The cross context VM structure.
1061 */
1062VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1063{
1064 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1065}
1066
1067
1068/**
1069 * Gets the guest CPU microarchitecture.
1070 *
1071 * @returns CPU microarchitecture.
1072 * @param pVM The cross context VM structure.
1073 */
1074VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
1075{
1076 return pVM->cpum.s.GuestFeatures.enmMicroarch;
1077}
1078
1079
1080VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
1081{
1082 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1083 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1084}
1085
1086
1087VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
1088{
1089 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1090 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1091}
1092
1093
1094VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
1095{
1096 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1097 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1098}
1099
1100
1101VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
1102{
1103 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1104 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1105}
1106
1107
1108VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1109{
1110 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1111 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1112 return VINF_SUCCESS; /* No need to recalc. */
1113}
1114
1115
1116VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
1117{
1118 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1119 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1120 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1121}
1122
1123
1124VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
1125{
1126 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1127 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1128 if (iReg == 4 || iReg == 5)
1129 iReg += 2;
1130 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1131 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1132}
1133
1134
1135/**
1136 * Recalculates the hypervisor DRx register values based on current guest
1137 * registers and DBGF breakpoints, updating changed registers depending on the
1138 * context.
1139 *
1140 * This is called whenever a guest DRx register is modified (any context) and
1141 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1142 *
1143 * In raw-mode context this function will reload any (hyper) DRx registers which
1144 * comes out with a different value. It may also have to save the host debug
1145 * registers if that haven't been done already. In this context though, we'll
1146 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1147 * are only important when breakpoints are actually enabled.
1148 *
1149 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1150 * reloaded by the HM code if it changes. Further more, we will only use the
1151 * combined register set when the VBox debugger is actually using hardware BPs,
1152 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1153 * concern us here).
1154 *
1155 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1156 * all the time.
1157 *
1158 * @returns VINF_SUCCESS.
1159 * @param pVCpu The cross context virtual CPU structure.
1160 * @param iGstReg The guest debug register number that was modified.
1161 * UINT8_MAX if not guest register.
1162 * @param fForceHyper Used in HM to force hyper registers because of single
1163 * stepping.
1164 */
1165VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg, bool fForceHyper)
1166{
1167 PVM pVM = pVCpu->CTX_SUFF(pVM);
1168#ifndef IN_RING0
1169 RT_NOREF_PV(iGstReg);
1170#endif
1171
1172 /*
1173 * Compare the DR7s first.
1174 *
1175 * We only care about the enabled flags. GD is virtualized when we
1176 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1177 * always have the LE and GE bits set, so no need to check and disable
1178 * stuff if they're cleared like we have to for the guest DR7.
1179 */
1180 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1181 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1182 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1183 uGstDr7 = 0;
1184 else if (!(uGstDr7 & X86_DR7_LE))
1185 uGstDr7 &= ~X86_DR7_LE_ALL;
1186 else if (!(uGstDr7 & X86_DR7_GE))
1187 uGstDr7 &= ~X86_DR7_GE_ALL;
1188
1189 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1190
1191 /** @todo r=bird: I'm totally confused by fForceHyper! */
1192#ifdef IN_RING0
1193 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1194 fForceHyper = true;
1195#endif
1196 if ((!fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
1197 {
1198 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1199
1200 /*
1201 * Ok, something is enabled. Recalc each of the breakpoints, taking
1202 * the VM debugger ones of the guest ones. In raw-mode context we will
1203 * not allow breakpoints with values inside the hypervisor area.
1204 */
1205 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1206
1207 /* bp 0 */
1208 RTGCUINTREG uNewDr0;
1209 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1210 {
1211 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1212 uNewDr0 = DBGFBpGetDR0(pVM);
1213 }
1214 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1215 {
1216 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1217 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1218 }
1219 else
1220 uNewDr0 = 0;
1221
1222 /* bp 1 */
1223 RTGCUINTREG uNewDr1;
1224 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1225 {
1226 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1227 uNewDr1 = DBGFBpGetDR1(pVM);
1228 }
1229 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1230 {
1231 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1232 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1233 }
1234 else
1235 uNewDr1 = 0;
1236
1237 /* bp 2 */
1238 RTGCUINTREG uNewDr2;
1239 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1240 {
1241 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1242 uNewDr2 = DBGFBpGetDR2(pVM);
1243 }
1244 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1245 {
1246 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1247 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1248 }
1249 else
1250 uNewDr2 = 0;
1251
1252 /* bp 3 */
1253 RTGCUINTREG uNewDr3;
1254 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1255 {
1256 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1257 uNewDr3 = DBGFBpGetDR3(pVM);
1258 }
1259 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1260 {
1261 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1262 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1263 }
1264 else
1265 uNewDr3 = 0;
1266
1267 /*
1268 * Apply the updates.
1269 */
1270 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1271 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1272 CPUMSetHyperDR3(pVCpu, uNewDr3);
1273 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1274 CPUMSetHyperDR2(pVCpu, uNewDr2);
1275 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1276 CPUMSetHyperDR1(pVCpu, uNewDr1);
1277 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1278 CPUMSetHyperDR0(pVCpu, uNewDr0);
1279 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1280 CPUMSetHyperDR7(pVCpu, uNewDr7);
1281 }
1282#ifdef IN_RING0
1283 else if (CPUMIsGuestDebugStateActive(pVCpu))
1284 {
1285 /*
1286 * Reload the register that was modified. Normally this won't happen
1287 * as we won't intercept DRx writes when not having the hyper debug
1288 * state loaded, but in case we do for some reason we'll simply deal
1289 * with it.
1290 */
1291 switch (iGstReg)
1292 {
1293 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1294 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1295 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1296 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1297 default:
1298 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1299 }
1300 }
1301#endif
1302 else
1303 {
1304 /*
1305 * No active debug state any more. In raw-mode this means we have to
1306 * make sure DR7 has everything disabled now, if we armed it already.
1307 * In ring-0 we might end up here when just single stepping.
1308 */
1309#ifdef IN_RING0
1310 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1311 {
1312 if (pVCpu->cpum.s.Hyper.dr[0])
1313 ASMSetDR0(0);
1314 if (pVCpu->cpum.s.Hyper.dr[1])
1315 ASMSetDR1(0);
1316 if (pVCpu->cpum.s.Hyper.dr[2])
1317 ASMSetDR2(0);
1318 if (pVCpu->cpum.s.Hyper.dr[3])
1319 ASMSetDR3(0);
1320 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1321 }
1322#endif
1323 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1324
1325 /* Clear all the registers. */
1326 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1327 pVCpu->cpum.s.Hyper.dr[3] = 0;
1328 pVCpu->cpum.s.Hyper.dr[2] = 0;
1329 pVCpu->cpum.s.Hyper.dr[1] = 0;
1330 pVCpu->cpum.s.Hyper.dr[0] = 0;
1331
1332 }
1333 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1334 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1335 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1336 pVCpu->cpum.s.Hyper.dr[7]));
1337
1338 return VINF_SUCCESS;
1339}
1340
1341
1342/**
1343 * Set the guest XCR0 register.
1344 *
1345 * Will load additional state if the FPU state is already loaded (in ring-0 &
1346 * raw-mode context).
1347 *
1348 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1349 * value.
1350 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1351 * @param uNewValue The new value.
1352 * @thread EMT(pVCpu)
1353 */
1354VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
1355{
1356 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1357 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1358 /* The X87 bit cannot be cleared. */
1359 && (uNewValue & XSAVE_C_X87)
1360 /* AVX requires SSE. */
1361 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1362 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1363 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1364 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1365 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1366 )
1367 {
1368 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1369
1370 /* If more state components are enabled, we need to take care to load
1371 them if the FPU/SSE state is already loaded. May otherwise leak
1372 host state to the guest. */
1373 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1374 if (fNewComponents)
1375 {
1376#ifdef IN_RING0
1377 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1378 {
1379 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1380 /* Adding more components. */
1381 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1382 else
1383 {
1384 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1385 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1386 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1387 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1388 }
1389 }
1390#endif
1391 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1392 }
1393 return VINF_SUCCESS;
1394 }
1395 return VERR_CPUM_RAISE_GP_0;
1396}
1397
1398
1399/**
1400 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1401 *
1402 * @returns true if in real mode, otherwise false.
1403 * @param pVCpu The cross context virtual CPU structure.
1404 */
1405VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1406{
1407 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1408 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1409}
1410
1411
1412/**
1413 * Tests if the guest has the Page Size Extension enabled (PSE).
1414 *
1415 * @returns true if in real mode, otherwise false.
1416 * @param pVCpu The cross context virtual CPU structure.
1417 */
1418VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1419{
1420 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1421 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1422 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1423}
1424
1425
1426/**
1427 * Tests if the guest has the paging enabled (PG).
1428 *
1429 * @returns true if in real mode, otherwise false.
1430 * @param pVCpu The cross context virtual CPU structure.
1431 */
1432VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1433{
1434 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1435 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1436}
1437
1438
1439/**
1440 * Tests if the guest has the paging enabled (PG).
1441 *
1442 * @returns true if in real mode, otherwise false.
1443 * @param pVCpu The cross context virtual CPU structure.
1444 */
1445VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1446{
1447 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1448 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1449}
1450
1451
1452/**
1453 * Tests if the guest is running in real mode or not.
1454 *
1455 * @returns true if in real mode, otherwise false.
1456 * @param pVCpu The cross context virtual CPU structure.
1457 */
1458VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1459{
1460 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1461 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1462}
1463
1464
1465/**
1466 * Tests if the guest is running in real or virtual 8086 mode.
1467 *
1468 * @returns @c true if it is, @c false if not.
1469 * @param pVCpu The cross context virtual CPU structure.
1470 */
1471VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1472{
1473 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1474 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1475 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1476}
1477
1478
1479/**
1480 * Tests if the guest is running in protected or not.
1481 *
1482 * @returns true if in protected mode, otherwise false.
1483 * @param pVCpu The cross context virtual CPU structure.
1484 */
1485VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
1486{
1487 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1488 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1489}
1490
1491
1492/**
1493 * Tests if the guest is running in paged protected or not.
1494 *
1495 * @returns true if in paged protected mode, otherwise false.
1496 * @param pVCpu The cross context virtual CPU structure.
1497 */
1498VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
1499{
1500 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1501 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1502}
1503
1504
1505/**
1506 * Tests if the guest is running in long mode or not.
1507 *
1508 * @returns true if in long mode, otherwise false.
1509 * @param pVCpu The cross context virtual CPU structure.
1510 */
1511VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
1512{
1513 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1514 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1515}
1516
1517
1518/**
1519 * Tests if the guest is running in PAE mode or not.
1520 *
1521 * @returns true if in PAE mode, otherwise false.
1522 * @param pVCpu The cross context virtual CPU structure.
1523 */
1524VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
1525{
1526 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1527 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1528 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1529 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1530 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1531 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1532}
1533
1534
1535/**
1536 * Tests if the guest is running in 64 bits mode or not.
1537 *
1538 * @returns true if in 64 bits protected mode, otherwise false.
1539 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1540 */
1541VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1542{
1543 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
1544 if (!CPUMIsGuestInLongMode(pVCpu))
1545 return false;
1546 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1547 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1548}
1549
1550
1551/**
1552 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1553 * registers.
1554 *
1555 * @returns true if in 64 bits protected mode, otherwise false.
1556 * @param pCtx Pointer to the current guest CPU context.
1557 */
1558VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1559{
1560 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1561}
1562
1563
1564/**
1565 * Sets the specified changed flags (CPUM_CHANGED_*).
1566 *
1567 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1568 * @param fChangedAdd The changed flags to add.
1569 */
1570VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
1571{
1572 pVCpu->cpum.s.fChanged |= fChangedAdd;
1573}
1574
1575
1576/**
1577 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
1578 *
1579 * @returns true if supported.
1580 * @returns false if not supported.
1581 * @param pVM The cross context VM structure.
1582 */
1583VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
1584{
1585 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
1586}
1587
1588
1589/**
1590 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1591 * @returns true if used.
1592 * @returns false if not used.
1593 * @param pVM The cross context VM structure.
1594 */
1595VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1596{
1597 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
1598}
1599
1600
1601/**
1602 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1603 * @returns true if used.
1604 * @returns false if not used.
1605 * @param pVM The cross context VM structure.
1606 */
1607VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1608{
1609 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
1610}
1611
1612
1613/**
1614 * Checks if we activated the FPU/XMM state of the guest OS.
1615 *
1616 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
1617 * time we'll be executing guest code, so it may return true for 64-on-32 when
1618 * we still haven't actually loaded the FPU status, just scheduled it to be
1619 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
1620 *
1621 * @returns true / false.
1622 * @param pVCpu The cross context virtual CPU structure.
1623 */
1624VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1625{
1626 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
1627}
1628
1629
1630/**
1631 * Checks if we've really loaded the FPU/XMM state of the guest OS.
1632 *
1633 * @returns true / false.
1634 * @param pVCpu The cross context virtual CPU structure.
1635 */
1636VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
1637{
1638 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1639}
1640
1641
1642/**
1643 * Checks if we saved the FPU/XMM state of the host OS.
1644 *
1645 * @returns true / false.
1646 * @param pVCpu The cross context virtual CPU structure.
1647 */
1648VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
1649{
1650 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
1651}
1652
1653
1654/**
1655 * Checks if the guest debug state is active.
1656 *
1657 * @returns boolean
1658 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1659 */
1660VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1661{
1662 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
1663}
1664
1665
1666/**
1667 * Checks if the guest debug state is to be made active during the world-switch
1668 * (currently only used for the 32->64 switcher case).
1669 *
1670 * @returns boolean
1671 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1672 */
1673VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
1674{
1675 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
1676}
1677
1678
1679/**
1680 * Checks if the hyper debug state is active.
1681 *
1682 * @returns boolean
1683 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1684 */
1685VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1686{
1687 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
1688}
1689
1690
1691/**
1692 * Checks if the hyper debug state is to be made active during the world-switch
1693 * (currently only used for the 32->64 switcher case).
1694 *
1695 * @returns boolean
1696 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1697 */
1698VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
1699{
1700 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
1701}
1702
1703
1704/**
1705 * Mark the guest's debug state as inactive.
1706 *
1707 * @returns boolean
1708 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1709 * @todo This API doesn't make sense any more.
1710 */
1711VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1712{
1713 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
1714 NOREF(pVCpu);
1715}
1716
1717
1718/**
1719 * Get the current privilege level of the guest.
1720 *
1721 * @returns CPL
1722 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1723 */
1724VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
1725{
1726 /*
1727 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
1728 *
1729 * Note! We used to check CS.DPL here, assuming it was always equal to
1730 * CPL even if a conforming segment was loaded. But this turned out to
1731 * only apply to older AMD-V. With VT-x we had an ACP2 regression
1732 * during install after a far call to ring 2 with VT-x. Then on newer
1733 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
1734 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
1735 *
1736 * So, forget CS.DPL, always use SS.DPL.
1737 *
1738 * Note! The SS RPL is always equal to the CPL, while the CS RPL
1739 * isn't necessarily equal if the segment is conforming.
1740 * See section 4.11.1 in the AMD manual.
1741 *
1742 * Update: Where the heck does it say CS.RPL can differ from CPL other than
1743 * right after real->prot mode switch and when in V8086 mode? That
1744 * section says the RPL specified in a direct transfere (call, jmp,
1745 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
1746 * it would be impossible for an exception handle or the iret
1747 * instruction to figure out whether SS:ESP are part of the frame
1748 * or not. VBox or qemu bug must've lead to this misconception.
1749 *
1750 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
1751 * selector into SS with an RPL other than the CPL when CPL != 3 and
1752 * we're in 64-bit mode. The intel dev box doesn't allow this, on
1753 * RPL = CPL. Weird.
1754 */
1755 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
1756 uint32_t uCpl;
1757 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1758 {
1759 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1760 {
1761 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
1762 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
1763 else
1764 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
1765 }
1766 else
1767 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
1768 }
1769 else
1770 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
1771 return uCpl;
1772}
1773
1774
1775/**
1776 * Gets the current guest CPU mode.
1777 *
1778 * If paging mode is what you need, check out PGMGetGuestMode().
1779 *
1780 * @returns The CPU mode.
1781 * @param pVCpu The cross context virtual CPU structure.
1782 */
1783VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
1784{
1785 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1786 CPUMMODE enmMode;
1787 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1788 enmMode = CPUMMODE_REAL;
1789 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1790 enmMode = CPUMMODE_PROTECTED;
1791 else
1792 enmMode = CPUMMODE_LONG;
1793
1794 return enmMode;
1795}
1796
1797
1798/**
1799 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
1800 *
1801 * @returns 16, 32 or 64.
1802 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1803 */
1804VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
1805{
1806 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1807
1808 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1809 return 16;
1810
1811 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1812 {
1813 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1814 return 16;
1815 }
1816
1817 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1818 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1819 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1820 return 64;
1821
1822 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1823 return 32;
1824
1825 return 16;
1826}
1827
1828
1829VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
1830{
1831 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1832
1833 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1834 return DISCPUMODE_16BIT;
1835
1836 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1837 {
1838 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1839 return DISCPUMODE_16BIT;
1840 }
1841
1842 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1843 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1844 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1845 return DISCPUMODE_64BIT;
1846
1847 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1848 return DISCPUMODE_32BIT;
1849
1850 return DISCPUMODE_16BIT;
1851}
1852
1853
1854/**
1855 * Gets the guest MXCSR_MASK value.
1856 *
1857 * This does not access the x87 state, but the value we determined at VM
1858 * initialization.
1859 *
1860 * @returns MXCSR mask.
1861 * @param pVM The cross context VM structure.
1862 */
1863VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
1864{
1865 return pVM->cpum.s.GuestInfo.fMxCsrMask;
1866}
1867
1868
1869/**
1870 * Returns whether the guest has physical interrupts enabled.
1871 *
1872 * @returns @c true if interrupts are enabled, @c false otherwise.
1873 * @param pVCpu The cross context virtual CPU structure.
1874 *
1875 * @remarks Warning! This function does -not- take into account the global-interrupt
1876 * flag (GIF).
1877 */
1878VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
1879{
1880 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
1881 {
1882 uint32_t const fEFlags = pVCpu->cpum.s.Guest.eflags.u;
1883 return RT_BOOL(fEFlags & X86_EFL_IF);
1884 }
1885
1886 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
1887 return CPUMIsGuestVmxPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1888
1889 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
1890 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1891}
1892
1893
1894/**
1895 * Returns whether the nested-guest has virtual interrupts enabled.
1896 *
1897 * @returns @c true if interrupts are enabled, @c false otherwise.
1898 * @param pVCpu The cross context virtual CPU structure.
1899 *
1900 * @remarks Warning! This function does -not- take into account the global-interrupt
1901 * flag (GIF).
1902 */
1903VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
1904{
1905 Assert(CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest));
1906
1907 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
1908 return CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1909
1910 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
1911 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1912}
1913
1914
1915/**
1916 * Calculates the interruptiblity of the guest.
1917 *
1918 * @returns Interruptibility level.
1919 * @param pVCpu The cross context virtual CPU structure.
1920 */
1921VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
1922{
1923#if 1
1924 /* Global-interrupt flag blocks pretty much everything we care about here. */
1925 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
1926 {
1927 /*
1928 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
1929 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
1930 * or raw-mode). Hence we use the function below which handles the details.
1931 */
1932 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
1933 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1934 {
1935 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
1936 || CPUMIsGuestVirtIntrEnabled(pVCpu))
1937 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1938
1939 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
1940 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
1941 }
1942
1943 /*
1944 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
1945 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
1946 * However, there is some uncertainity regarding the converse, i.e. whether
1947 * NMI-blocking until IRET blocks delivery of physical interrupts.
1948 *
1949 * See Intel spec. 25.4.1 "Event Blocking".
1950 */
1951 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1952 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1953
1954 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1955 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1956
1957 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1958 }
1959 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1960#else
1961 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
1962 {
1963 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1964 {
1965 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1966 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1967
1968 /** @todo does blocking NMIs mean interrupts are also inhibited? */
1969 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1970 {
1971 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1972 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1973 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1974 }
1975 AssertFailed();
1976 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1977 }
1978 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1979 }
1980 else
1981 {
1982 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1983 {
1984 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1985 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1986 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1987 }
1988 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1989 }
1990#endif
1991}
1992
1993
1994/**
1995 * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.
1996 *
1997 * @returns @c true if NMIs are blocked, @c false otherwise.
1998 * @param pVCpu The cross context virtual CPU structure.
1999 */
2000VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
2001{
2002 /*
2003 * Return the state of guest-NMI blocking in any of the following cases:
2004 * - We're not executing a nested-guest.
2005 * - We're executing an SVM nested-guest[1].
2006 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2007 *
2008 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2009 * SVM hypervisors must track NMI blocking themselves by intercepting
2010 * the IRET instruction after injection of an NMI.
2011 */
2012 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2013 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2014 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2015 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2016 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2017
2018 /*
2019 * Return the state of virtual-NMI blocking, if we are executing a
2020 * VMX nested-guest with virtual-NMIs enabled.
2021 */
2022 return CPUMIsGuestVmxVirtNmiBlocking(pVCpu, pCtx);
2023}
2024
2025
2026/**
2027 * Sets blocking delivery of NMIs to the guest.
2028 *
2029 * @param pVCpu The cross context virtual CPU structure.
2030 * @param fBlock Whether NMIs are blocked or not.
2031 */
2032VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
2033{
2034 /*
2035 * Set the state of guest-NMI blocking in any of the following cases:
2036 * - We're not executing a nested-guest.
2037 * - We're executing an SVM nested-guest[1].
2038 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2039 *
2040 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2041 * SVM hypervisors must track NMI blocking themselves by intercepting
2042 * the IRET instruction after injection of an NMI.
2043 */
2044 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2045 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2046 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2047 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2048 {
2049 if (fBlock)
2050 {
2051 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2052 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2053 }
2054 else
2055 {
2056 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2057 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2058 }
2059 return;
2060 }
2061
2062 /*
2063 * Set the state of virtual-NMI blocking, if we are executing a
2064 * VMX nested-guest with virtual-NMIs enabled.
2065 */
2066 return CPUMSetGuestVmxVirtNmiBlocking(pVCpu, pCtx, fBlock);
2067}
2068
2069
2070/**
2071 * Checks whether the SVM nested-guest has physical interrupts enabled.
2072 *
2073 * @returns true if interrupts are enabled, false otherwise.
2074 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2075 * @param pCtx The guest-CPU context.
2076 *
2077 * @remarks This does -not- take into account the global-interrupt flag.
2078 */
2079VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2080{
2081 /** @todo Optimization: Avoid this function call and use a pointer to the
2082 * relevant eflags instead (setup during VMRUN instruction emulation). */
2083 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2084
2085 X86EFLAGS fEFlags;
2086 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2087 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2088 else
2089 fEFlags.u = pCtx->eflags.u;
2090
2091 return fEFlags.Bits.u1IF;
2092}
2093
2094
2095/**
2096 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2097 * for injection by VMRUN instruction) interrupts.
2098 *
2099 * @returns VBox status code.
2100 * @retval true if it's ready, false otherwise.
2101 *
2102 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2103 * @param pCtx The guest-CPU context.
2104 */
2105VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2106{
2107 RT_NOREF(pVCpu);
2108 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2109
2110 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2111 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2112 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2113 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2114 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2115 return false;
2116
2117 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2118}
2119
2120
2121/**
2122 * Gets the pending SVM nested-guest interruptvector.
2123 *
2124 * @returns The nested-guest interrupt to inject.
2125 * @param pCtx The guest-CPU context.
2126 */
2127VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2128{
2129 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2130 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
2131}
2132
2133
2134/**
2135 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2136 *
2137 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2138 * @param pCtx The guest-CPU context.
2139 */
2140VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
2141{
2142 /*
2143 * Reload the guest's "host state".
2144 */
2145 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2146 pCtx->es = pHostState->es;
2147 pCtx->cs = pHostState->cs;
2148 pCtx->ss = pHostState->ss;
2149 pCtx->ds = pHostState->ds;
2150 pCtx->gdtr = pHostState->gdtr;
2151 pCtx->idtr = pHostState->idtr;
2152 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2153 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2154 pCtx->cr3 = pHostState->uCr3;
2155 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2156 pCtx->rflags = pHostState->rflags;
2157 pCtx->rflags.Bits.u1VM = 0;
2158 pCtx->rip = pHostState->uRip;
2159 pCtx->rsp = pHostState->uRsp;
2160 pCtx->rax = pHostState->uRax;
2161 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2162 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2163 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2164
2165 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2166 * raise \#GP(0) in the guest. */
2167
2168 /** @todo check the loaded host-state for consistency. Figure out what
2169 * exactly this involves? */
2170}
2171
2172
2173/**
2174 * Saves the host-state to the host-state save area as part of a VMRUN.
2175 *
2176 * @param pCtx The guest-CPU context.
2177 * @param cbInstr The length of the VMRUN instruction in bytes.
2178 */
2179VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2180{
2181 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2182 pHostState->es = pCtx->es;
2183 pHostState->cs = pCtx->cs;
2184 pHostState->ss = pCtx->ss;
2185 pHostState->ds = pCtx->ds;
2186 pHostState->gdtr = pCtx->gdtr;
2187 pHostState->idtr = pCtx->idtr;
2188 pHostState->uEferMsr = pCtx->msrEFER;
2189 pHostState->uCr0 = pCtx->cr0;
2190 pHostState->uCr3 = pCtx->cr3;
2191 pHostState->uCr4 = pCtx->cr4;
2192 pHostState->rflags = pCtx->rflags;
2193 pHostState->uRip = pCtx->rip + cbInstr;
2194 pHostState->uRsp = pCtx->rsp;
2195 pHostState->uRax = pCtx->rax;
2196}
2197
2198
2199/**
2200 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2201 * nested-guest.
2202 *
2203 * @returns The TSC offset after applying any nested-guest TSC offset.
2204 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2205 * @param uTscValue The guest TSC.
2206 *
2207 * @sa CPUMRemoveNestedGuestTscOffset.
2208 */
2209VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2210{
2211 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2212 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2213 {
2214 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2215 Assert(pVmcs);
2216 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2217 return uTscValue + pVmcs->u64TscOffset.u;
2218 return uTscValue;
2219 }
2220
2221 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2222 {
2223 uint64_t offTsc;
2224 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2225 {
2226 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2227 Assert(pVmcb);
2228 offTsc = pVmcb->ctrl.u64TSCOffset;
2229 }
2230 return uTscValue + offTsc;
2231 }
2232 return uTscValue;
2233}
2234
2235
2236/**
2237 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
2238 * guest.
2239 *
2240 * @returns The TSC offset after removing any nested-guest TSC offset.
2241 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2242 * @param uTscValue The nested-guest TSC.
2243 *
2244 * @sa CPUMApplyNestedGuestTscOffset.
2245 */
2246VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2247{
2248 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2249 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2250 {
2251 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2252 {
2253 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2254 Assert(pVmcs);
2255 return uTscValue - pVmcs->u64TscOffset.u;
2256 }
2257 return uTscValue;
2258 }
2259
2260 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2261 {
2262 uint64_t offTsc;
2263 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2264 {
2265 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2266 Assert(pVmcb);
2267 offTsc = pVmcb->ctrl.u64TSCOffset;
2268 }
2269 return uTscValue - offTsc;
2270 }
2271 return uTscValue;
2272}
2273
2274
2275/**
2276 * Used to dynamically imports state residing in NEM or HM.
2277 *
2278 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2279 *
2280 * @returns VBox status code.
2281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2282 * @param fExtrnImport The fields to import.
2283 * @thread EMT(pVCpu)
2284 */
2285VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
2286{
2287 VMCPU_ASSERT_EMT(pVCpu);
2288 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2289 {
2290 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2291 {
2292 case CPUMCTX_EXTRN_KEEPER_NEM:
2293 {
2294 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
2295 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2296 return rc;
2297 }
2298
2299 case CPUMCTX_EXTRN_KEEPER_HM:
2300 {
2301#ifdef IN_RING0
2302 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
2303 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2304 return rc;
2305#else
2306 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
2307 return VINF_SUCCESS;
2308#endif
2309 }
2310 default:
2311 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2312 }
2313 }
2314 return VINF_SUCCESS;
2315}
2316
2317
2318/**
2319 * Gets valid CR4 bits for the guest.
2320 *
2321 * @returns Valid CR4 bits.
2322 * @param pVM The cross context VM structure.
2323 */
2324VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
2325{
2326 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
2327 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
2328 | X86_CR4_TSD | X86_CR4_DE
2329 | X86_CR4_PSE | X86_CR4_PAE
2330 | X86_CR4_MCE | X86_CR4_PGE
2331 | X86_CR4_PCE
2332 | X86_CR4_OSXMMEEXCPT; /** @todo r=ramshankar: Introduced in Pentium III along with SSE. Check fSse here? */
2333 if (pGuestFeatures->fFxSaveRstor)
2334 fMask |= X86_CR4_OSFXSR;
2335 if (pGuestFeatures->fVmx)
2336 fMask |= X86_CR4_VMXE;
2337 if (pGuestFeatures->fXSaveRstor)
2338 fMask |= X86_CR4_OSXSAVE;
2339 if (pGuestFeatures->fPcid)
2340 fMask |= X86_CR4_PCIDE;
2341 if (pGuestFeatures->fFsGsBase)
2342 fMask |= X86_CR4_FSGSBASE;
2343 return fMask;
2344}
2345
2346
2347/**
2348 * Gets the read and write permission bits for an MSR in an MSR bitmap.
2349 *
2350 * @returns VMXMSRPM_XXX - the MSR permission.
2351 * @param pvMsrBitmap Pointer to the MSR bitmap.
2352 * @param idMsr The MSR to get permissions for.
2353 *
2354 * @sa hmR0VmxSetMsrPermission.
2355 */
2356VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
2357{
2358 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2359
2360 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
2361
2362 /*
2363 * MSR Layout:
2364 * Byte index MSR range Interpreted as
2365 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2366 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2367 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2368 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2369 *
2370 * A bit corresponding to an MSR within the above range causes a VM-exit
2371 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2372 * the MSR range, it always cause a VM-exit.
2373 *
2374 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2375 */
2376 uint32_t const offBitmapRead = 0;
2377 uint32_t const offBitmapWrite = 0x800;
2378 uint32_t offMsr;
2379 uint32_t iBit;
2380 if (idMsr <= UINT32_C(0x00001fff))
2381 {
2382 offMsr = 0;
2383 iBit = idMsr;
2384 }
2385 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2386 {
2387 offMsr = 0x400;
2388 iBit = idMsr - UINT32_C(0xc0000000);
2389 }
2390 else
2391 {
2392 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
2393 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
2394 }
2395
2396 /*
2397 * Get the MSR read permissions.
2398 */
2399 uint32_t fRet;
2400 uint32_t const offMsrRead = offBitmapRead + offMsr;
2401 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2402 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
2403 fRet = VMXMSRPM_EXIT_RD;
2404 else
2405 fRet = VMXMSRPM_ALLOW_RD;
2406
2407 /*
2408 * Get the MSR write permissions.
2409 */
2410 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
2411 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2412 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
2413 fRet |= VMXMSRPM_EXIT_WR;
2414 else
2415 fRet |= VMXMSRPM_ALLOW_WR;
2416
2417 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
2418 return fRet;
2419}
2420
2421
2422/**
2423 * Gets the permission bits for the specified I/O port from the given I/O bitmaps.
2424 *
2425 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
2426 * @param pvIoBitmapA Pointer to I/O bitmap A.
2427 * @param pvIoBitmapB Pointer to I/O bitmap B.
2428 * @param uPort The I/O port being accessed.
2429 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2430 */
2431VMM_INT_DECL(bool) CPUMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
2432 uint8_t cbAccess)
2433{
2434 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
2435
2436 /*
2437 * If the I/O port access wraps around the 16-bit port I/O space,
2438 * we must cause a VM-exit.
2439 *
2440 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2441 */
2442 /** @todo r=ramshankar: Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc
2443 * respectively are valid and do not constitute a wrap around from what I
2444 * understand. Verify this later. */
2445 uint32_t const uPortLast = uPort + cbAccess;
2446 if (uPortLast > 0x10000)
2447 return true;
2448
2449 /* Read the appropriate bit from the corresponding IO bitmap. */
2450 void const *pvIoBitmap = uPort < 0x8000 ? pvIoBitmapA : pvIoBitmapB;
2451 return ASMBitTest(pvIoBitmap, uPort);
2452}
2453
2454
2455/**
2456 * Returns whether the given VMCS field is valid and supported for the guest.
2457 *
2458 * @param pVM The cross context VM structure.
2459 * @param u64VmcsField The VMCS field.
2460 *
2461 * @remarks This takes into account the CPU features exposed to the guest.
2462 */
2463VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
2464{
2465 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
2466 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
2467 if (!uFieldEncHi)
2468 { /* likely */ }
2469 else
2470 return false;
2471
2472 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
2473 switch (uFieldEncLo)
2474 {
2475 /*
2476 * 16-bit fields.
2477 */
2478 /* Control fields. */
2479 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
2480 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
2481 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
2482
2483 /* Guest-state fields. */
2484 case VMX_VMCS16_GUEST_ES_SEL:
2485 case VMX_VMCS16_GUEST_CS_SEL:
2486 case VMX_VMCS16_GUEST_SS_SEL:
2487 case VMX_VMCS16_GUEST_DS_SEL:
2488 case VMX_VMCS16_GUEST_FS_SEL:
2489 case VMX_VMCS16_GUEST_GS_SEL:
2490 case VMX_VMCS16_GUEST_LDTR_SEL:
2491 case VMX_VMCS16_GUEST_TR_SEL: return true;
2492 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
2493 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
2494
2495 /* Host-state fields. */
2496 case VMX_VMCS16_HOST_ES_SEL:
2497 case VMX_VMCS16_HOST_CS_SEL:
2498 case VMX_VMCS16_HOST_SS_SEL:
2499 case VMX_VMCS16_HOST_DS_SEL:
2500 case VMX_VMCS16_HOST_FS_SEL:
2501 case VMX_VMCS16_HOST_GS_SEL:
2502 case VMX_VMCS16_HOST_TR_SEL: return true;
2503
2504 /*
2505 * 64-bit fields.
2506 */
2507 /* Control fields. */
2508 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
2509 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
2510 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
2511 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
2512 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
2513 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
2514 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
2515 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
2516 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
2517 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
2518 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
2519 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
2520 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
2521 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
2522 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
2523 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
2524 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
2525 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
2526 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
2527 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
2528 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
2529 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
2530 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
2531 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
2532 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
2533 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
2534 case VMX_VMCS64_CTRL_EPTP_FULL:
2535 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
2536 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
2537 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
2538 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
2539 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
2540 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
2541 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
2542 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
2543 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
2544 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
2545 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
2546 {
2547 PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
2548 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
2549 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
2550 }
2551 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
2552 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
2553 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
2554 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
2555 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
2556 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
2557 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
2558 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
2559 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
2560 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
2561 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
2562 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
2563
2564 /* Read-only data fields. */
2565 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
2566 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
2567
2568 /* Guest-state fields. */
2569 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
2570 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
2571 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
2572 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
2573 case VMX_VMCS64_GUEST_PAT_FULL:
2574 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
2575 case VMX_VMCS64_GUEST_EFER_FULL:
2576 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
2577 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
2578 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
2579 case VMX_VMCS64_GUEST_PDPTE0_FULL:
2580 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
2581 case VMX_VMCS64_GUEST_PDPTE1_FULL:
2582 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
2583 case VMX_VMCS64_GUEST_PDPTE2_FULL:
2584 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
2585 case VMX_VMCS64_GUEST_PDPTE3_FULL:
2586 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
2587 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
2588 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
2589
2590 /* Host-state fields. */
2591 case VMX_VMCS64_HOST_PAT_FULL:
2592 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
2593 case VMX_VMCS64_HOST_EFER_FULL:
2594 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
2595 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
2596 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
2597
2598 /*
2599 * 32-bit fields.
2600 */
2601 /* Control fields. */
2602 case VMX_VMCS32_CTRL_PIN_EXEC:
2603 case VMX_VMCS32_CTRL_PROC_EXEC:
2604 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
2605 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
2606 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
2607 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
2608 case VMX_VMCS32_CTRL_EXIT:
2609 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
2610 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
2611 case VMX_VMCS32_CTRL_ENTRY:
2612 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
2613 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
2614 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
2615 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
2616 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
2617 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
2618 case VMX_VMCS32_CTRL_PLE_GAP:
2619 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
2620
2621 /* Read-only data fields. */
2622 case VMX_VMCS32_RO_VM_INSTR_ERROR:
2623 case VMX_VMCS32_RO_EXIT_REASON:
2624 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
2625 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
2626 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
2627 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
2628 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
2629 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
2630
2631 /* Guest-state fields. */
2632 case VMX_VMCS32_GUEST_ES_LIMIT:
2633 case VMX_VMCS32_GUEST_CS_LIMIT:
2634 case VMX_VMCS32_GUEST_SS_LIMIT:
2635 case VMX_VMCS32_GUEST_DS_LIMIT:
2636 case VMX_VMCS32_GUEST_FS_LIMIT:
2637 case VMX_VMCS32_GUEST_GS_LIMIT:
2638 case VMX_VMCS32_GUEST_LDTR_LIMIT:
2639 case VMX_VMCS32_GUEST_TR_LIMIT:
2640 case VMX_VMCS32_GUEST_GDTR_LIMIT:
2641 case VMX_VMCS32_GUEST_IDTR_LIMIT:
2642 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
2643 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
2644 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
2645 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
2646 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
2647 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
2648 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
2649 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
2650 case VMX_VMCS32_GUEST_INT_STATE:
2651 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
2652 case VMX_VMCS32_GUEST_SMBASE:
2653 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
2654 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
2655
2656 /* Host-state fields. */
2657 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
2658
2659 /*
2660 * Natural-width fields.
2661 */
2662 /* Control fields. */
2663 case VMX_VMCS_CTRL_CR0_MASK:
2664 case VMX_VMCS_CTRL_CR4_MASK:
2665 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
2666 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
2667 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
2668 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
2669 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
2670 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
2671
2672 /* Read-only data fields. */
2673 case VMX_VMCS_RO_EXIT_QUALIFICATION:
2674 case VMX_VMCS_RO_IO_RCX:
2675 case VMX_VMCS_RO_IO_RSI:
2676 case VMX_VMCS_RO_IO_RDI:
2677 case VMX_VMCS_RO_IO_RIP:
2678 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
2679
2680 /* Guest-state fields. */
2681 case VMX_VMCS_GUEST_CR0:
2682 case VMX_VMCS_GUEST_CR3:
2683 case VMX_VMCS_GUEST_CR4:
2684 case VMX_VMCS_GUEST_ES_BASE:
2685 case VMX_VMCS_GUEST_CS_BASE:
2686 case VMX_VMCS_GUEST_SS_BASE:
2687 case VMX_VMCS_GUEST_DS_BASE:
2688 case VMX_VMCS_GUEST_FS_BASE:
2689 case VMX_VMCS_GUEST_GS_BASE:
2690 case VMX_VMCS_GUEST_LDTR_BASE:
2691 case VMX_VMCS_GUEST_TR_BASE:
2692 case VMX_VMCS_GUEST_GDTR_BASE:
2693 case VMX_VMCS_GUEST_IDTR_BASE:
2694 case VMX_VMCS_GUEST_DR7:
2695 case VMX_VMCS_GUEST_RSP:
2696 case VMX_VMCS_GUEST_RIP:
2697 case VMX_VMCS_GUEST_RFLAGS:
2698 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
2699 case VMX_VMCS_GUEST_SYSENTER_ESP:
2700 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
2701
2702 /* Host-state fields. */
2703 case VMX_VMCS_HOST_CR0:
2704 case VMX_VMCS_HOST_CR3:
2705 case VMX_VMCS_HOST_CR4:
2706 case VMX_VMCS_HOST_FS_BASE:
2707 case VMX_VMCS_HOST_GS_BASE:
2708 case VMX_VMCS_HOST_TR_BASE:
2709 case VMX_VMCS_HOST_GDTR_BASE:
2710 case VMX_VMCS_HOST_IDTR_BASE:
2711 case VMX_VMCS_HOST_SYSENTER_ESP:
2712 case VMX_VMCS_HOST_SYSENTER_EIP:
2713 case VMX_VMCS_HOST_RSP:
2714 case VMX_VMCS_HOST_RIP: return true;
2715 }
2716
2717 return false;
2718}
2719
2720
2721/**
2722 * Checks whether the given I/O access should cause a nested-guest VM-exit.
2723 *
2724 * @returns @c true if it causes a VM-exit, @c false otherwise.
2725 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2726 * @param u16Port The I/O port being accessed.
2727 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2728 */
2729VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2730{
2731 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2732 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
2733 return true;
2734
2735 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
2736 {
2737 uint8_t const *pbIoBitmapA = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap);
2738 uint8_t const *pbIoBitmapB = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
2739 Assert(pbIoBitmapA);
2740 Assert(pbIoBitmapB);
2741 return CPUMGetVmxIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
2742 }
2743
2744 return false;
2745}
2746
2747
2748/**
2749 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
2750 *
2751 * @returns @c true if it causes a VM-exit, @c false otherwise.
2752 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2753 * @param uNewCr3 The CR3 value being written.
2754 */
2755VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
2756{
2757 /*
2758 * If the CR3-load exiting control is set and the new CR3 value does not
2759 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
2760 *
2761 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2762 */
2763 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2764 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2765 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
2766 {
2767 uint32_t const uCr3TargetCount = pVmcs->u32Cr3TargetCount;
2768 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
2769
2770 /* If the CR3-target count is 0, cause a VM-exit. */
2771 if (uCr3TargetCount == 0)
2772 return true;
2773
2774 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
2775 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
2776 if ( uNewCr3 != pVmcs->u64Cr3Target0.u
2777 && uNewCr3 != pVmcs->u64Cr3Target1.u
2778 && uNewCr3 != pVmcs->u64Cr3Target2.u
2779 && uNewCr3 != pVmcs->u64Cr3Target3.u)
2780 return true;
2781 }
2782 return false;
2783}
2784
2785
2786/**
2787 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
2788 * VM-exit or not.
2789 *
2790 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
2791 * @param pVCpu The cross context virtual CPU structure.
2792 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
2793 * VMX_EXIT_VMREAD).
2794 * @param u64VmcsField The VMCS field.
2795 */
2796VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
2797{
2798 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
2799 Assert( uExitReason == VMX_EXIT_VMREAD
2800 || uExitReason == VMX_EXIT_VMWRITE);
2801
2802 /*
2803 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
2804 */
2805 if (!CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
2806 return true;
2807
2808 /*
2809 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
2810 * is intercepted. This excludes any reserved bits in the valid parts of the field
2811 * encoding (i.e. bit 12).
2812 */
2813 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
2814 return true;
2815
2816 /*
2817 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
2818 */
2819 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
2820 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
2821 ? (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
2822 : (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
2823 Assert(pbBitmap);
2824 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
2825 return ASMBitTest(pbBitmap + (u32VmcsField >> 3), u32VmcsField & 7);
2826}
2827
2828
2829
2830/**
2831 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
2832 *
2833 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
2834 * @param u16Port The IO port being accessed.
2835 * @param enmIoType The type of IO access.
2836 * @param cbReg The IO operand size in bytes.
2837 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
2838 * @param iEffSeg The effective segment number.
2839 * @param fRep Whether this is a repeating IO instruction (REP prefix).
2840 * @param fStrIo Whether this is a string IO instruction.
2841 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
2842 * Optional, can be NULL.
2843 */
2844VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
2845 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
2846 PSVMIOIOEXITINFO pIoExitInfo)
2847{
2848 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
2849 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
2850
2851 /*
2852 * The IOPM layout:
2853 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
2854 * two 4K pages.
2855 *
2856 * For IO instructions that access more than a single byte, the permission bits
2857 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
2858 *
2859 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
2860 * we need 3 extra bits beyond the second 4K page.
2861 */
2862 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
2863
2864 uint16_t const offIopm = u16Port >> 3;
2865 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
2866 uint8_t const cShift = u16Port - (offIopm << 3);
2867 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
2868
2869 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
2870 Assert(pbIopm);
2871 pbIopm += offIopm;
2872 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
2873 if (u16Iopm & fIopmMask)
2874 {
2875 if (pIoExitInfo)
2876 {
2877 static const uint32_t s_auIoOpSize[] =
2878 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
2879
2880 static const uint32_t s_auIoAddrSize[] =
2881 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
2882
2883 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
2884 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
2885 pIoExitInfo->n.u1Str = fStrIo;
2886 pIoExitInfo->n.u1Rep = fRep;
2887 pIoExitInfo->n.u3Seg = iEffSeg & 7;
2888 pIoExitInfo->n.u1Type = enmIoType;
2889 pIoExitInfo->n.u16Port = u16Port;
2890 }
2891 return true;
2892 }
2893
2894 /** @todo remove later (for debugging as VirtualBox always traps all IO
2895 * intercepts). */
2896 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
2897 return false;
2898}
2899
2900
2901/**
2902 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
2903 *
2904 * @returns VBox status code.
2905 * @param idMsr The MSR being requested.
2906 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
2907 * bitmap for @a idMsr.
2908 * @param puMsrpmBit Where to store the bit offset starting at the byte
2909 * returned in @a pbOffMsrpm.
2910 */
2911VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
2912{
2913 Assert(pbOffMsrpm);
2914 Assert(puMsrpmBit);
2915
2916 /*
2917 * MSRPM Layout:
2918 * Byte offset MSR range
2919 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
2920 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
2921 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
2922 * 0x1800 - 0x1fff Reserved
2923 *
2924 * Each MSR is represented by 2 permission bits (read and write).
2925 */
2926 if (idMsr <= 0x00001fff)
2927 {
2928 /* Pentium-compatible MSRs. */
2929 uint32_t const bitoffMsr = idMsr << 1;
2930 *pbOffMsrpm = bitoffMsr >> 3;
2931 *puMsrpmBit = bitoffMsr & 7;
2932 return VINF_SUCCESS;
2933 }
2934
2935 if ( idMsr >= 0xc0000000
2936 && idMsr <= 0xc0001fff)
2937 {
2938 /* AMD Sixth Generation x86 Processor MSRs. */
2939 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
2940 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
2941 *puMsrpmBit = bitoffMsr & 7;
2942 return VINF_SUCCESS;
2943 }
2944
2945 if ( idMsr >= 0xc0010000
2946 && idMsr <= 0xc0011fff)
2947 {
2948 /* AMD Seventh and Eighth Generation Processor MSRs. */
2949 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
2950 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
2951 *puMsrpmBit = bitoffMsr & 7;
2952 return VINF_SUCCESS;
2953 }
2954
2955 *pbOffMsrpm = 0;
2956 *puMsrpmBit = 0;
2957 return VERR_OUT_OF_RANGE;
2958}
2959
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette