VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 80020

Last change on this file since 80020 was 80020, checked in by vboxsync, 5 years ago

VMM: Kicking out raw-mode (work in progress) - vm.h. bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 118.9 KB
Line 
1/* $Id: CPUMAllRegs.cpp 80020 2019-07-26 18:49:57Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#ifndef IN_RC
30# include <VBox/vmm/nem.h>
31# include <VBox/vmm/hm.h>
32#endif
33#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
34# include <VBox/vmm/selm.h>
35#endif
36#include "CPUMInternal.h"
37#include <VBox/vmm/vm.h>
38#include <VBox/err.h>
39#include <VBox/dis.h>
40#include <VBox/log.h>
41#include <VBox/vmm/hm.h>
42#include <VBox/vmm/tm.h>
43#include <iprt/assert.h>
44#include <iprt/asm.h>
45#include <iprt/asm-amd64-x86.h>
46#ifdef IN_RING3
47# include <iprt/thread.h>
48#endif
49
50/** Disable stack frame pointer generation here. */
51#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
52# pragma optimize("y", off)
53#endif
54
55AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
56AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
57
58
59/*********************************************************************************************************************************
60* Defined Constants And Macros *
61*********************************************************************************************************************************/
62/**
63 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
64 *
65 * @returns Pointer to the Virtual CPU.
66 * @param a_pGuestCtx Pointer to the guest context.
67 */
68#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
69
70/**
71 * Lazily loads the hidden parts of a selector register when using raw-mode.
72 */
73#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
74# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
75 do \
76 { \
77 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
78 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
79 } while (0)
80#else
81# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
82 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
83#endif
84
85/** @def CPUM_INT_ASSERT_NOT_EXTRN
86 * Macro for asserting that @a a_fNotExtrn are present.
87 *
88 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
89 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
90 */
91#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
92 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
93 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
94
95
96
97
98#ifdef VBOX_WITH_RAW_MODE_NOT_R0
99
100/**
101 * Does the lazy hidden selector register loading.
102 *
103 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
104 * @param pSReg The selector register to lazily load hidden parts of.
105 */
106static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
107{
108 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
109 Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
110 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
111
112 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
113 {
114 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
115 pSReg->Attr.u = 0;
116 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
117 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
118 pSReg->Attr.n.u2Dpl = 3;
119 pSReg->Attr.n.u1Present = 1;
120 pSReg->u32Limit = 0x0000ffff;
121 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
122 pSReg->ValidSel = pSReg->Sel;
123 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
124 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
125 }
126 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
127 {
128 /* Real mode - leave the limit and flags alone here, at least for now. */
129 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
130 pSReg->ValidSel = pSReg->Sel;
131 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
132 }
133 else
134 {
135 /* Protected mode - get it from the selector descriptor tables. */
136 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
137 {
138 Assert(!CPUMIsGuestInLongMode(pVCpu));
139 pSReg->Sel = 0;
140 pSReg->u64Base = 0;
141 pSReg->u32Limit = 0;
142 pSReg->Attr.u = 0;
143 pSReg->ValidSel = 0;
144 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
145 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
146 }
147 else
148 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
149 }
150}
151
152
153/**
154 * Makes sure the hidden CS and SS selector registers are valid, loading them if
155 * necessary.
156 *
157 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
158 */
159VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
160{
161 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
162 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
163}
164
165
166/**
167 * Loads a the hidden parts of a selector register.
168 *
169 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
170 * @param pSReg The selector register to lazily load hidden parts of.
171 */
172VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
173{
174 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
175}
176
177#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
178
179
180/**
181 * Obsolete.
182 *
183 * We don't support nested hypervisor context interrupts or traps. Life is much
184 * simpler when we don't. It's also slightly faster at times.
185 *
186 * @param pVCpu The cross context virtual CPU structure.
187 */
188VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
189{
190 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
191}
192
193
194/**
195 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
196 *
197 * @param pVCpu The cross context virtual CPU structure.
198 */
199VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
200{
201 return &pVCpu->cpum.s.Hyper;
202}
203
204
205VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
206{
207 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
208 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
209}
210
211
212VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
213{
214 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
215 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
216}
217
218
219VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
220{
221 pVCpu->cpum.s.Hyper.cr3 = cr3;
222
223#ifdef IN_RC
224 /* Update the current CR3. */
225 ASMSetCR3(cr3);
226#endif
227}
228
229VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
230{
231 return pVCpu->cpum.s.Hyper.cr3;
232}
233
234
235VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
236{
237 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
238}
239
240
241VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
242{
243 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
244}
245
246
247VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
248{
249 pVCpu->cpum.s.Hyper.es.Sel = SelES;
250}
251
252
253VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
254{
255 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
256}
257
258
259VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
260{
261 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
262}
263
264
265VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
266{
267 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
268}
269
270
271VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
272{
273 pVCpu->cpum.s.Hyper.esp = u32ESP;
274}
275
276
277VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
278{
279 pVCpu->cpum.s.Hyper.esp = u32ESP;
280}
281
282
283VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
284{
285 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
286 return VINF_SUCCESS;
287}
288
289
290VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
291{
292 pVCpu->cpum.s.Hyper.eip = u32EIP;
293}
294
295
296/**
297 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
298 * EFLAGS and EIP prior to resuming guest execution.
299 *
300 * All general register not given as a parameter will be set to 0. The EFLAGS
301 * register will be set to sane values for C/C++ code execution with interrupts
302 * disabled and IOPL 0.
303 *
304 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
305 * @param u32EIP The EIP value.
306 * @param u32ESP The ESP value.
307 * @param u32EAX The EAX value.
308 * @param u32EDX The EDX value.
309 */
310VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
311{
312 pVCpu->cpum.s.Hyper.eip = u32EIP;
313 pVCpu->cpum.s.Hyper.esp = u32ESP;
314 pVCpu->cpum.s.Hyper.eax = u32EAX;
315 pVCpu->cpum.s.Hyper.edx = u32EDX;
316 pVCpu->cpum.s.Hyper.ecx = 0;
317 pVCpu->cpum.s.Hyper.ebx = 0;
318 pVCpu->cpum.s.Hyper.ebp = 0;
319 pVCpu->cpum.s.Hyper.esi = 0;
320 pVCpu->cpum.s.Hyper.edi = 0;
321 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
322}
323
324
325VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
326{
327 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
328}
329
330
331VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
332{
333 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
334}
335
336
337/** @def MAYBE_LOAD_DRx
338 * Macro for updating DRx values in raw-mode and ring-0 contexts.
339 */
340#ifdef IN_RING0
341# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
342# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
343 do { \
344 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
345 a_fnLoad(a_uValue); \
346 else \
347 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
348 } while (0)
349# else
350# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
351 do { \
352 a_fnLoad(a_uValue); \
353 } while (0)
354# endif
355
356#elif defined(IN_RC)
357# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
358 do { \
359 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
360 { a_fnLoad(a_uValue); } \
361 } while (0)
362
363#else
364# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
365#endif
366
367VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
368{
369 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
370 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
371}
372
373
374VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
375{
376 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
377 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
378}
379
380
381VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
382{
383 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
384 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
385}
386
387
388VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
389{
390 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
391 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
392}
393
394
395VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
396{
397 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
398}
399
400
401VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
402{
403 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
404#ifdef IN_RC
405 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
406#endif
407}
408
409
410VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
411{
412 return pVCpu->cpum.s.Hyper.cs.Sel;
413}
414
415
416VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
417{
418 return pVCpu->cpum.s.Hyper.ds.Sel;
419}
420
421
422VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
423{
424 return pVCpu->cpum.s.Hyper.es.Sel;
425}
426
427
428VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
429{
430 return pVCpu->cpum.s.Hyper.fs.Sel;
431}
432
433
434VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
435{
436 return pVCpu->cpum.s.Hyper.gs.Sel;
437}
438
439
440VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
441{
442 return pVCpu->cpum.s.Hyper.ss.Sel;
443}
444
445
446VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
447{
448 return pVCpu->cpum.s.Hyper.eax;
449}
450
451
452VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
453{
454 return pVCpu->cpum.s.Hyper.ebx;
455}
456
457
458VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
459{
460 return pVCpu->cpum.s.Hyper.ecx;
461}
462
463
464VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
465{
466 return pVCpu->cpum.s.Hyper.edx;
467}
468
469
470VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
471{
472 return pVCpu->cpum.s.Hyper.esi;
473}
474
475
476VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
477{
478 return pVCpu->cpum.s.Hyper.edi;
479}
480
481
482VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
483{
484 return pVCpu->cpum.s.Hyper.ebp;
485}
486
487
488VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
489{
490 return pVCpu->cpum.s.Hyper.esp;
491}
492
493
494VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
495{
496 return pVCpu->cpum.s.Hyper.eflags.u32;
497}
498
499
500VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
501{
502 return pVCpu->cpum.s.Hyper.eip;
503}
504
505
506VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
507{
508 return pVCpu->cpum.s.Hyper.rip;
509}
510
511
512VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
513{
514 if (pcbLimit)
515 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
516 return pVCpu->cpum.s.Hyper.idtr.pIdt;
517}
518
519
520VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
521{
522 if (pcbLimit)
523 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
524 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
525}
526
527
528VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
529{
530 return pVCpu->cpum.s.Hyper.ldtr.Sel;
531}
532
533
534VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
535{
536 return pVCpu->cpum.s.Hyper.dr[0];
537}
538
539
540VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
541{
542 return pVCpu->cpum.s.Hyper.dr[1];
543}
544
545
546VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
547{
548 return pVCpu->cpum.s.Hyper.dr[2];
549}
550
551
552VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
553{
554 return pVCpu->cpum.s.Hyper.dr[3];
555}
556
557
558VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
559{
560 return pVCpu->cpum.s.Hyper.dr[6];
561}
562
563
564VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
565{
566 return pVCpu->cpum.s.Hyper.dr[7];
567}
568
569
570/**
571 * Gets the pointer to the internal CPUMCTXCORE structure.
572 * This is only for reading in order to save a few calls.
573 *
574 * @param pVCpu The cross context virtual CPU structure.
575 */
576VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
577{
578 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
579}
580
581
582/**
583 * Queries the pointer to the internal CPUMCTX structure.
584 *
585 * @returns The CPUMCTX pointer.
586 * @param pVCpu The cross context virtual CPU structure.
587 */
588VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
589{
590 return &pVCpu->cpum.s.Guest;
591}
592
593
594/**
595 * Queries the pointer to the internal CPUMCTXMSRS structure.
596 *
597 * This is for NEM only.
598 *
599 * @returns The CPUMCTX pointer.
600 * @param pVCpu The cross context virtual CPU structure.
601 */
602VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
603{
604 return &pVCpu->cpum.s.GuestMsrs;
605}
606
607
608VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
609{
610 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
611 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
612 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
613 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
614 return VINF_SUCCESS; /* formality, consider it void. */
615}
616
617
618VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
619{
620 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
621 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
622 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
623 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
624 return VINF_SUCCESS; /* formality, consider it void. */
625}
626
627
628VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
629{
630 pVCpu->cpum.s.Guest.tr.Sel = tr;
631 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
632 return VINF_SUCCESS; /* formality, consider it void. */
633}
634
635
636VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
637{
638 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
639 /* The caller will set more hidden bits if it has them. */
640 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
641 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
642 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
643 return VINF_SUCCESS; /* formality, consider it void. */
644}
645
646
647/**
648 * Set the guest CR0.
649 *
650 * When called in GC, the hyper CR0 may be updated if that is
651 * required. The caller only has to take special action if AM,
652 * WP, PG or PE changes.
653 *
654 * @returns VINF_SUCCESS (consider it void).
655 * @param pVCpu The cross context virtual CPU structure.
656 * @param cr0 The new CR0 value.
657 */
658VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
659{
660#ifdef IN_RC
661 /*
662 * Check if we need to change hypervisor CR0 because
663 * of math stuff.
664 */
665 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
666 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
667 {
668 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
669 {
670 /*
671 * We haven't loaded the guest FPU state yet, so TS and MT are both set
672 * and EM should be reflecting the guest EM (it always does this).
673 */
674 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
675 {
676 uint32_t HyperCR0 = ASMGetCR0();
677 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
678 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
679 HyperCR0 &= ~X86_CR0_EM;
680 HyperCR0 |= cr0 & X86_CR0_EM;
681 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
682 ASMSetCR0(HyperCR0);
683 }
684# ifdef VBOX_STRICT
685 else
686 {
687 uint32_t HyperCR0 = ASMGetCR0();
688 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
689 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
690 }
691# endif
692 }
693 else
694 {
695 /*
696 * Already loaded the guest FPU state, so we're just mirroring
697 * the guest flags.
698 */
699 uint32_t HyperCR0 = ASMGetCR0();
700 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
701 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
702 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
703 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
704 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
705 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
706 ASMSetCR0(HyperCR0);
707 }
708 }
709#endif /* IN_RC */
710
711 /*
712 * Check for changes causing TLB flushes (for REM).
713 * The caller is responsible for calling PGM when appropriate.
714 */
715 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
716 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
717 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
718 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
719
720 /*
721 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
722 */
723 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
724 PGMCr0WpEnabled(pVCpu);
725
726 /* The ET flag is settable on a 386 and hardwired on 486+. */
727 if ( !(cr0 & X86_CR0_ET)
728 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
729 cr0 |= X86_CR0_ET;
730
731 pVCpu->cpum.s.Guest.cr0 = cr0;
732 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
733 return VINF_SUCCESS;
734}
735
736
737VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
738{
739 pVCpu->cpum.s.Guest.cr2 = cr2;
740 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
741 return VINF_SUCCESS;
742}
743
744
745VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
746{
747 pVCpu->cpum.s.Guest.cr3 = cr3;
748 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
749 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
750 return VINF_SUCCESS;
751}
752
753
754VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
755{
756 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
757
758 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
759 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
760 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
761
762 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
763 pVCpu->cpum.s.Guest.cr4 = cr4;
764 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
765 return VINF_SUCCESS;
766}
767
768
769VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
770{
771 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
772 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
773 return VINF_SUCCESS;
774}
775
776
777VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
778{
779 pVCpu->cpum.s.Guest.eip = eip;
780 return VINF_SUCCESS;
781}
782
783
784VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
785{
786 pVCpu->cpum.s.Guest.eax = eax;
787 return VINF_SUCCESS;
788}
789
790
791VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
792{
793 pVCpu->cpum.s.Guest.ebx = ebx;
794 return VINF_SUCCESS;
795}
796
797
798VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
799{
800 pVCpu->cpum.s.Guest.ecx = ecx;
801 return VINF_SUCCESS;
802}
803
804
805VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
806{
807 pVCpu->cpum.s.Guest.edx = edx;
808 return VINF_SUCCESS;
809}
810
811
812VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
813{
814 pVCpu->cpum.s.Guest.esp = esp;
815 return VINF_SUCCESS;
816}
817
818
819VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
820{
821 pVCpu->cpum.s.Guest.ebp = ebp;
822 return VINF_SUCCESS;
823}
824
825
826VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
827{
828 pVCpu->cpum.s.Guest.esi = esi;
829 return VINF_SUCCESS;
830}
831
832
833VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
834{
835 pVCpu->cpum.s.Guest.edi = edi;
836 return VINF_SUCCESS;
837}
838
839
840VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
841{
842 pVCpu->cpum.s.Guest.ss.Sel = ss;
843 return VINF_SUCCESS;
844}
845
846
847VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
848{
849 pVCpu->cpum.s.Guest.cs.Sel = cs;
850 return VINF_SUCCESS;
851}
852
853
854VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
855{
856 pVCpu->cpum.s.Guest.ds.Sel = ds;
857 return VINF_SUCCESS;
858}
859
860
861VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
862{
863 pVCpu->cpum.s.Guest.es.Sel = es;
864 return VINF_SUCCESS;
865}
866
867
868VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
869{
870 pVCpu->cpum.s.Guest.fs.Sel = fs;
871 return VINF_SUCCESS;
872}
873
874
875VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
876{
877 pVCpu->cpum.s.Guest.gs.Sel = gs;
878 return VINF_SUCCESS;
879}
880
881
882VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
883{
884 pVCpu->cpum.s.Guest.msrEFER = val;
885 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
886}
887
888
889VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
890{
891 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
892 if (pcbLimit)
893 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
894 return pVCpu->cpum.s.Guest.idtr.pIdt;
895}
896
897
898VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
899{
900 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
901 if (pHidden)
902 *pHidden = pVCpu->cpum.s.Guest.tr;
903 return pVCpu->cpum.s.Guest.tr.Sel;
904}
905
906
907VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
908{
909 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
910 return pVCpu->cpum.s.Guest.cs.Sel;
911}
912
913
914VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
915{
916 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
917 return pVCpu->cpum.s.Guest.ds.Sel;
918}
919
920
921VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
922{
923 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
924 return pVCpu->cpum.s.Guest.es.Sel;
925}
926
927
928VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
929{
930 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
931 return pVCpu->cpum.s.Guest.fs.Sel;
932}
933
934
935VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
936{
937 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
938 return pVCpu->cpum.s.Guest.gs.Sel;
939}
940
941
942VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
943{
944 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
945 return pVCpu->cpum.s.Guest.ss.Sel;
946}
947
948
949VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
950{
951 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
952 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
953 if ( !CPUMIsGuestInLongMode(pVCpu)
954 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
955 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
956 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
957}
958
959
960VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
961{
962 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
963 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
964 if ( !CPUMIsGuestInLongMode(pVCpu)
965 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
966 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
967 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
968}
969
970
971VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
972{
973 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
974 return pVCpu->cpum.s.Guest.ldtr.Sel;
975}
976
977
978VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
979{
980 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
981 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
982 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
983 return pVCpu->cpum.s.Guest.ldtr.Sel;
984}
985
986
987VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
988{
989 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
990 return pVCpu->cpum.s.Guest.cr0;
991}
992
993
994VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
995{
996 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
997 return pVCpu->cpum.s.Guest.cr2;
998}
999
1000
1001VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
1002{
1003 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
1004 return pVCpu->cpum.s.Guest.cr3;
1005}
1006
1007
1008VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
1009{
1010 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1011 return pVCpu->cpum.s.Guest.cr4;
1012}
1013
1014
1015VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPU pVCpu)
1016{
1017 uint64_t u64;
1018 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1019 if (RT_FAILURE(rc))
1020 u64 = 0;
1021 return u64;
1022}
1023
1024
1025VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
1026{
1027 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
1028 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1029}
1030
1031
1032VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
1033{
1034 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1035 return pVCpu->cpum.s.Guest.eip;
1036}
1037
1038
1039VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
1040{
1041 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1042 return pVCpu->cpum.s.Guest.rip;
1043}
1044
1045
1046VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
1047{
1048 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
1049 return pVCpu->cpum.s.Guest.eax;
1050}
1051
1052
1053VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
1054{
1055 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
1056 return pVCpu->cpum.s.Guest.ebx;
1057}
1058
1059
1060VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
1061{
1062 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
1063 return pVCpu->cpum.s.Guest.ecx;
1064}
1065
1066
1067VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
1068{
1069 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
1070 return pVCpu->cpum.s.Guest.edx;
1071}
1072
1073
1074VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
1075{
1076 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
1077 return pVCpu->cpum.s.Guest.esi;
1078}
1079
1080
1081VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
1082{
1083 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
1084 return pVCpu->cpum.s.Guest.edi;
1085}
1086
1087
1088VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
1089{
1090 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
1091 return pVCpu->cpum.s.Guest.esp;
1092}
1093
1094
1095VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
1096{
1097 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
1098 return pVCpu->cpum.s.Guest.ebp;
1099}
1100
1101
1102VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
1103{
1104 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1105 return pVCpu->cpum.s.Guest.eflags.u32;
1106}
1107
1108
1109VMMDECL(int) CPUMGetGuestCRx(PCVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1110{
1111 switch (iReg)
1112 {
1113 case DISCREG_CR0:
1114 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1115 *pValue = pVCpu->cpum.s.Guest.cr0;
1116 break;
1117
1118 case DISCREG_CR2:
1119 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
1120 *pValue = pVCpu->cpum.s.Guest.cr2;
1121 break;
1122
1123 case DISCREG_CR3:
1124 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
1125 *pValue = pVCpu->cpum.s.Guest.cr3;
1126 break;
1127
1128 case DISCREG_CR4:
1129 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1130 *pValue = pVCpu->cpum.s.Guest.cr4;
1131 break;
1132
1133 case DISCREG_CR8:
1134 {
1135 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1136 uint8_t u8Tpr;
1137 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1138 if (RT_FAILURE(rc))
1139 {
1140 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1141 *pValue = 0;
1142 return rc;
1143 }
1144 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
1145 break;
1146 }
1147
1148 default:
1149 return VERR_INVALID_PARAMETER;
1150 }
1151 return VINF_SUCCESS;
1152}
1153
1154
1155VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
1156{
1157 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1158 return pVCpu->cpum.s.Guest.dr[0];
1159}
1160
1161
1162VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
1163{
1164 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1165 return pVCpu->cpum.s.Guest.dr[1];
1166}
1167
1168
1169VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
1170{
1171 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1172 return pVCpu->cpum.s.Guest.dr[2];
1173}
1174
1175
1176VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
1177{
1178 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1179 return pVCpu->cpum.s.Guest.dr[3];
1180}
1181
1182
1183VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
1184{
1185 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
1186 return pVCpu->cpum.s.Guest.dr[6];
1187}
1188
1189
1190VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
1191{
1192 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
1193 return pVCpu->cpum.s.Guest.dr[7];
1194}
1195
1196
1197VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1198{
1199 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
1200 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1201 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1202 if (iReg == 4 || iReg == 5)
1203 iReg += 2;
1204 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1205 return VINF_SUCCESS;
1206}
1207
1208
1209VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
1210{
1211 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1212 return pVCpu->cpum.s.Guest.msrEFER;
1213}
1214
1215
1216/**
1217 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1218 *
1219 * @returns Pointer to the leaf if found, NULL if not.
1220 *
1221 * @param pVM The cross context VM structure.
1222 * @param uLeaf The leaf to get.
1223 */
1224PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1225{
1226 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1227 if (iEnd)
1228 {
1229 unsigned iStart = 0;
1230 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1231 for (;;)
1232 {
1233 unsigned i = iStart + (iEnd - iStart) / 2U;
1234 if (uLeaf < paLeaves[i].uLeaf)
1235 {
1236 if (i <= iStart)
1237 return NULL;
1238 iEnd = i;
1239 }
1240 else if (uLeaf > paLeaves[i].uLeaf)
1241 {
1242 i += 1;
1243 if (i >= iEnd)
1244 return NULL;
1245 iStart = i;
1246 }
1247 else
1248 {
1249 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1250 return &paLeaves[i];
1251
1252 /* This shouldn't normally happen. But in case the it does due
1253 to user configuration overrids or something, just return the
1254 first sub-leaf. */
1255 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1256 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1257 while ( paLeaves[i].uSubLeaf != 0
1258 && i > 0
1259 && uLeaf == paLeaves[i - 1].uLeaf)
1260 i--;
1261 return &paLeaves[i];
1262 }
1263 }
1264 }
1265
1266 return NULL;
1267}
1268
1269
1270/**
1271 * Looks up a CPUID leaf in the CPUID leaf array.
1272 *
1273 * @returns Pointer to the leaf if found, NULL if not.
1274 *
1275 * @param pVM The cross context VM structure.
1276 * @param uLeaf The leaf to get.
1277 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1278 * isn't.
1279 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1280 */
1281PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1282{
1283 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1284 if (iEnd)
1285 {
1286 unsigned iStart = 0;
1287 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1288 for (;;)
1289 {
1290 unsigned i = iStart + (iEnd - iStart) / 2U;
1291 if (uLeaf < paLeaves[i].uLeaf)
1292 {
1293 if (i <= iStart)
1294 return NULL;
1295 iEnd = i;
1296 }
1297 else if (uLeaf > paLeaves[i].uLeaf)
1298 {
1299 i += 1;
1300 if (i >= iEnd)
1301 return NULL;
1302 iStart = i;
1303 }
1304 else
1305 {
1306 uSubLeaf &= paLeaves[i].fSubLeafMask;
1307 if (uSubLeaf == paLeaves[i].uSubLeaf)
1308 *pfExactSubLeafHit = true;
1309 else
1310 {
1311 /* Find the right subleaf. We return the last one before
1312 uSubLeaf if we don't find an exact match. */
1313 if (uSubLeaf < paLeaves[i].uSubLeaf)
1314 while ( i > 0
1315 && uLeaf == paLeaves[i - 1].uLeaf
1316 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1317 i--;
1318 else
1319 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1320 && uLeaf == paLeaves[i + 1].uLeaf
1321 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1322 i++;
1323 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1324 }
1325 return &paLeaves[i];
1326 }
1327 }
1328 }
1329
1330 *pfExactSubLeafHit = false;
1331 return NULL;
1332}
1333
1334
1335/**
1336 * Gets a CPUID leaf.
1337 *
1338 * @param pVCpu The cross context virtual CPU structure.
1339 * @param uLeaf The CPUID leaf to get.
1340 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1341 * @param pEax Where to store the EAX value.
1342 * @param pEbx Where to store the EBX value.
1343 * @param pEcx Where to store the ECX value.
1344 * @param pEdx Where to store the EDX value.
1345 */
1346VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1347 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1348{
1349 bool fExactSubLeafHit;
1350 PVM pVM = pVCpu->CTX_SUFF(pVM);
1351 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1352 if (pLeaf)
1353 {
1354 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1355 if (fExactSubLeafHit)
1356 {
1357 *pEax = pLeaf->uEax;
1358 *pEbx = pLeaf->uEbx;
1359 *pEcx = pLeaf->uEcx;
1360 *pEdx = pLeaf->uEdx;
1361
1362 /*
1363 * Deal with CPU specific information.
1364 */
1365 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1366 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1367 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1368 {
1369 if (uLeaf == 1)
1370 {
1371 /* EBX: Bits 31-24: Initial APIC ID. */
1372 Assert(pVCpu->idCpu <= 255);
1373 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1374 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1375
1376 /* EDX: Bit 9: AND with APICBASE.EN. */
1377 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1378 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1379
1380 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1381 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1382 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1383 }
1384 else if (uLeaf == 0xb)
1385 {
1386 /* EDX: Initial extended APIC ID. */
1387 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1388 *pEdx = pVCpu->idCpu;
1389 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1390 }
1391 else if (uLeaf == UINT32_C(0x8000001e))
1392 {
1393 /* EAX: Initial extended APIC ID. */
1394 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1395 *pEax = pVCpu->idCpu;
1396 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1397 }
1398 else if (uLeaf == UINT32_C(0x80000001))
1399 {
1400 /* EDX: Bit 9: AND with APICBASE.EN. */
1401 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1402 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1403 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1404 }
1405 else
1406 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1407 }
1408 }
1409 /*
1410 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1411 * them here, but we do the best we can here...
1412 */
1413 else
1414 {
1415 *pEax = *pEbx = *pEcx = *pEdx = 0;
1416 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1417 {
1418 *pEcx = uSubLeaf & 0xff;
1419 *pEdx = pVCpu->idCpu;
1420 }
1421 }
1422 }
1423 else
1424 {
1425 /*
1426 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1427 */
1428 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1429 {
1430 default:
1431 AssertFailed();
1432 RT_FALL_THRU();
1433 case CPUMUNKNOWNCPUID_DEFAULTS:
1434 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1435 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1436 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1437 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1438 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1439 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1440 break;
1441 case CPUMUNKNOWNCPUID_PASSTHRU:
1442 *pEax = uLeaf;
1443 *pEbx = 0;
1444 *pEcx = uSubLeaf;
1445 *pEdx = 0;
1446 break;
1447 }
1448 }
1449 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1450}
1451
1452
1453/**
1454 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1455 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1456 *
1457 * @returns Previous value.
1458 * @param pVCpu The cross context virtual CPU structure to make the
1459 * change on. Usually the calling EMT.
1460 * @param fVisible Whether to make it visible (true) or hide it (false).
1461 *
1462 * @remarks This is "VMMDECL" so that it still links with
1463 * the old APIC code which is in VBoxDD2 and not in
1464 * the VMM module.
1465 */
1466VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1467{
1468 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1469 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1470
1471#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1472 /*
1473 * Patch manager saved state legacy pain.
1474 */
1475 PVM pVM = pVCpu->CTX_SUFF(pVM);
1476 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1477 if (pLeaf)
1478 {
1479 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1480 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1481 else
1482 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1483 }
1484
1485 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1486 if (pLeaf)
1487 {
1488 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1489 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1490 else
1491 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1492 }
1493#endif
1494
1495 return fOld;
1496}
1497
1498
1499/**
1500 * Gets the host CPU vendor.
1501 *
1502 * @returns CPU vendor.
1503 * @param pVM The cross context VM structure.
1504 */
1505VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1506{
1507 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1508}
1509
1510
1511/**
1512 * Gets the CPU vendor.
1513 *
1514 * @returns CPU vendor.
1515 * @param pVM The cross context VM structure.
1516 */
1517VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1518{
1519 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1520}
1521
1522
1523VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1524{
1525 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1526 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1527}
1528
1529
1530VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1531{
1532 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1533 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1534}
1535
1536
1537VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1538{
1539 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1540 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1541}
1542
1543
1544VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1545{
1546 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1547 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1548}
1549
1550
1551VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1552{
1553 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1554 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1555 return VINF_SUCCESS; /* No need to recalc. */
1556}
1557
1558
1559VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1560{
1561 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1562 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1563 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1564}
1565
1566
1567VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1568{
1569 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1570 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1571 if (iReg == 4 || iReg == 5)
1572 iReg += 2;
1573 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1574 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1575}
1576
1577
1578/**
1579 * Recalculates the hypervisor DRx register values based on current guest
1580 * registers and DBGF breakpoints, updating changed registers depending on the
1581 * context.
1582 *
1583 * This is called whenever a guest DRx register is modified (any context) and
1584 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1585 *
1586 * In raw-mode context this function will reload any (hyper) DRx registers which
1587 * comes out with a different value. It may also have to save the host debug
1588 * registers if that haven't been done already. In this context though, we'll
1589 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1590 * are only important when breakpoints are actually enabled.
1591 *
1592 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1593 * reloaded by the HM code if it changes. Further more, we will only use the
1594 * combined register set when the VBox debugger is actually using hardware BPs,
1595 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1596 * concern us here).
1597 *
1598 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1599 * all the time.
1600 *
1601 * @returns VINF_SUCCESS.
1602 * @param pVCpu The cross context virtual CPU structure.
1603 * @param iGstReg The guest debug register number that was modified.
1604 * UINT8_MAX if not guest register.
1605 * @param fForceHyper Used in HM to force hyper registers because of single
1606 * stepping.
1607 */
1608VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1609{
1610 PVM pVM = pVCpu->CTX_SUFF(pVM);
1611#ifndef IN_RING0
1612 RT_NOREF_PV(iGstReg);
1613#endif
1614
1615 /*
1616 * Compare the DR7s first.
1617 *
1618 * We only care about the enabled flags. GD is virtualized when we
1619 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1620 * always have the LE and GE bits set, so no need to check and disable
1621 * stuff if they're cleared like we have to for the guest DR7.
1622 */
1623 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1624 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1625 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1626 uGstDr7 = 0;
1627 else if (!(uGstDr7 & X86_DR7_LE))
1628 uGstDr7 &= ~X86_DR7_LE_ALL;
1629 else if (!(uGstDr7 & X86_DR7_GE))
1630 uGstDr7 &= ~X86_DR7_GE_ALL;
1631
1632 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1633
1634#ifdef IN_RING0
1635 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1636 fForceHyper = true;
1637#endif
1638 if ( (!VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7))
1639 & X86_DR7_ENABLED_MASK)
1640 {
1641 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1642#ifdef IN_RC
1643 bool const fRawModeEnabled = true;
1644#elif defined(IN_RING3)
1645 bool const fRawModeEnabled = VM_IS_RAW_MODE_ENABLED(pVM);
1646#endif
1647
1648 /*
1649 * Ok, something is enabled. Recalc each of the breakpoints, taking
1650 * the VM debugger ones of the guest ones. In raw-mode context we will
1651 * not allow breakpoints with values inside the hypervisor area.
1652 */
1653 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1654
1655 /* bp 0 */
1656 RTGCUINTREG uNewDr0;
1657 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1658 {
1659 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1660 uNewDr0 = DBGFBpGetDR0(pVM);
1661 }
1662 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1663 {
1664 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1665#ifndef IN_RING0
1666 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1667 uNewDr0 = 0;
1668 else
1669#endif
1670 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1671 }
1672 else
1673 uNewDr0 = 0;
1674
1675 /* bp 1 */
1676 RTGCUINTREG uNewDr1;
1677 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1678 {
1679 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1680 uNewDr1 = DBGFBpGetDR1(pVM);
1681 }
1682 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1683 {
1684 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1685#ifndef IN_RING0
1686 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1687 uNewDr1 = 0;
1688 else
1689#endif
1690 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1691 }
1692 else
1693 uNewDr1 = 0;
1694
1695 /* bp 2 */
1696 RTGCUINTREG uNewDr2;
1697 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1698 {
1699 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1700 uNewDr2 = DBGFBpGetDR2(pVM);
1701 }
1702 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1703 {
1704 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1705#ifndef IN_RING0
1706 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1707 uNewDr2 = 0;
1708 else
1709#endif
1710 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1711 }
1712 else
1713 uNewDr2 = 0;
1714
1715 /* bp 3 */
1716 RTGCUINTREG uNewDr3;
1717 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1718 {
1719 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1720 uNewDr3 = DBGFBpGetDR3(pVM);
1721 }
1722 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1723 {
1724 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1725#ifndef IN_RING0
1726 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1727 uNewDr3 = 0;
1728 else
1729#endif
1730 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1731 }
1732 else
1733 uNewDr3 = 0;
1734
1735 /*
1736 * Apply the updates.
1737 */
1738#ifdef IN_RC
1739 /* Make sure to save host registers first. */
1740 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1741 {
1742 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1743 {
1744 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1745 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1746 }
1747 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1748 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1749 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1750 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1751 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1752
1753 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1754 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1755 ASMSetDR0(uNewDr0);
1756 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1757 ASMSetDR1(uNewDr1);
1758 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1759 ASMSetDR2(uNewDr2);
1760 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1761 ASMSetDR3(uNewDr3);
1762 ASMSetDR6(X86_DR6_INIT_VAL);
1763 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1764 ASMSetDR7(uNewDr7);
1765 }
1766 else
1767#endif
1768 {
1769 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1770 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1771 CPUMSetHyperDR3(pVCpu, uNewDr3);
1772 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1773 CPUMSetHyperDR2(pVCpu, uNewDr2);
1774 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1775 CPUMSetHyperDR1(pVCpu, uNewDr1);
1776 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1777 CPUMSetHyperDR0(pVCpu, uNewDr0);
1778 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1779 CPUMSetHyperDR7(pVCpu, uNewDr7);
1780 }
1781 }
1782#ifdef IN_RING0
1783 else if (CPUMIsGuestDebugStateActive(pVCpu))
1784 {
1785 /*
1786 * Reload the register that was modified. Normally this won't happen
1787 * as we won't intercept DRx writes when not having the hyper debug
1788 * state loaded, but in case we do for some reason we'll simply deal
1789 * with it.
1790 */
1791 switch (iGstReg)
1792 {
1793 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1794 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1795 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1796 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1797 default:
1798 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1799 }
1800 }
1801#endif
1802 else
1803 {
1804 /*
1805 * No active debug state any more. In raw-mode this means we have to
1806 * make sure DR7 has everything disabled now, if we armed it already.
1807 * In ring-0 we might end up here when just single stepping.
1808 */
1809#if defined(IN_RC) || defined(IN_RING0)
1810 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1811 {
1812# ifdef IN_RC
1813 ASMSetDR7(X86_DR7_INIT_VAL);
1814# endif
1815 if (pVCpu->cpum.s.Hyper.dr[0])
1816 ASMSetDR0(0);
1817 if (pVCpu->cpum.s.Hyper.dr[1])
1818 ASMSetDR1(0);
1819 if (pVCpu->cpum.s.Hyper.dr[2])
1820 ASMSetDR2(0);
1821 if (pVCpu->cpum.s.Hyper.dr[3])
1822 ASMSetDR3(0);
1823 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1824 }
1825#endif
1826 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1827
1828 /* Clear all the registers. */
1829 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1830 pVCpu->cpum.s.Hyper.dr[3] = 0;
1831 pVCpu->cpum.s.Hyper.dr[2] = 0;
1832 pVCpu->cpum.s.Hyper.dr[1] = 0;
1833 pVCpu->cpum.s.Hyper.dr[0] = 0;
1834
1835 }
1836 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1837 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1838 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1839 pVCpu->cpum.s.Hyper.dr[7]));
1840
1841 return VINF_SUCCESS;
1842}
1843
1844
1845/**
1846 * Set the guest XCR0 register.
1847 *
1848 * Will load additional state if the FPU state is already loaded (in ring-0 &
1849 * raw-mode context).
1850 *
1851 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1852 * value.
1853 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1854 * @param uNewValue The new value.
1855 * @thread EMT(pVCpu)
1856 */
1857VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1858{
1859 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1860 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1861 /* The X87 bit cannot be cleared. */
1862 && (uNewValue & XSAVE_C_X87)
1863 /* AVX requires SSE. */
1864 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1865 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1866 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1867 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1868 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1869 )
1870 {
1871 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1872
1873 /* If more state components are enabled, we need to take care to load
1874 them if the FPU/SSE state is already loaded. May otherwise leak
1875 host state to the guest. */
1876 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1877 if (fNewComponents)
1878 {
1879#if defined(IN_RING0) || defined(IN_RC)
1880 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1881 {
1882 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1883 /* Adding more components. */
1884 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1885 else
1886 {
1887 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1888 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1889 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1890 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1891 }
1892 }
1893#endif
1894 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1895 }
1896 return VINF_SUCCESS;
1897 }
1898 return VERR_CPUM_RAISE_GP_0;
1899}
1900
1901
1902/**
1903 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1904 *
1905 * @returns true if in real mode, otherwise false.
1906 * @param pVCpu The cross context virtual CPU structure.
1907 */
1908VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1909{
1910 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1911 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1912}
1913
1914
1915/**
1916 * Tests if the guest has the Page Size Extension enabled (PSE).
1917 *
1918 * @returns true if in real mode, otherwise false.
1919 * @param pVCpu The cross context virtual CPU structure.
1920 */
1921VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1922{
1923 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1924 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1925 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1926}
1927
1928
1929/**
1930 * Tests if the guest has the paging enabled (PG).
1931 *
1932 * @returns true if in real mode, otherwise false.
1933 * @param pVCpu The cross context virtual CPU structure.
1934 */
1935VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1936{
1937 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1938 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1939}
1940
1941
1942/**
1943 * Tests if the guest has the paging enabled (PG).
1944 *
1945 * @returns true if in real mode, otherwise false.
1946 * @param pVCpu The cross context virtual CPU structure.
1947 */
1948VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1949{
1950 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1951 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1952}
1953
1954
1955/**
1956 * Tests if the guest is running in real mode or not.
1957 *
1958 * @returns true if in real mode, otherwise false.
1959 * @param pVCpu The cross context virtual CPU structure.
1960 */
1961VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1962{
1963 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1964 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1965}
1966
1967
1968/**
1969 * Tests if the guest is running in real or virtual 8086 mode.
1970 *
1971 * @returns @c true if it is, @c false if not.
1972 * @param pVCpu The cross context virtual CPU structure.
1973 */
1974VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1975{
1976 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1977 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1978 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1979}
1980
1981
1982/**
1983 * Tests if the guest is running in protected or not.
1984 *
1985 * @returns true if in protected mode, otherwise false.
1986 * @param pVCpu The cross context virtual CPU structure.
1987 */
1988VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
1989{
1990 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1991 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1992}
1993
1994
1995/**
1996 * Tests if the guest is running in paged protected or not.
1997 *
1998 * @returns true if in paged protected mode, otherwise false.
1999 * @param pVCpu The cross context virtual CPU structure.
2000 */
2001VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
2002{
2003 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
2004 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2005}
2006
2007
2008/**
2009 * Tests if the guest is running in long mode or not.
2010 *
2011 * @returns true if in long mode, otherwise false.
2012 * @param pVCpu The cross context virtual CPU structure.
2013 */
2014VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
2015{
2016 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
2017 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2018}
2019
2020
2021/**
2022 * Tests if the guest is running in PAE mode or not.
2023 *
2024 * @returns true if in PAE mode, otherwise false.
2025 * @param pVCpu The cross context virtual CPU structure.
2026 */
2027VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
2028{
2029 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
2030 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2031 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2032 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2033 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2034 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2035}
2036
2037
2038/**
2039 * Tests if the guest is running in 64 bits mode or not.
2040 *
2041 * @returns true if in 64 bits protected mode, otherwise false.
2042 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2043 */
2044VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2045{
2046 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
2047 if (!CPUMIsGuestInLongMode(pVCpu))
2048 return false;
2049 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2050 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2051}
2052
2053
2054/**
2055 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2056 * registers.
2057 *
2058 * @returns true if in 64 bits protected mode, otherwise false.
2059 * @param pCtx Pointer to the current guest CPU context.
2060 */
2061VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2062{
2063 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2064}
2065
2066#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2067
2068/**
2069 *
2070 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2071 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2072 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2073 */
2074VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PCVMCPU pVCpu)
2075{
2076 return pVCpu->cpum.s.fRawEntered;
2077}
2078
2079/**
2080 * Transforms the guest CPU state to raw-ring mode.
2081 *
2082 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2083 *
2084 * @returns VBox status code. (recompiler failure)
2085 * @param pVCpu The cross context virtual CPU structure.
2086 * @see @ref pg_raw
2087 */
2088VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2089{
2090 PVM pVM = pVCpu->CTX_SUFF(pVM);
2091
2092 Assert(!pVCpu->cpum.s.fRawEntered);
2093 Assert(!pVCpu->cpum.s.fRemEntered);
2094 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2095
2096 /*
2097 * Are we in Ring-0?
2098 */
2099 if ( pCtx->ss.Sel
2100 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2101 && !pCtx->eflags.Bits.u1VM)
2102 {
2103 /*
2104 * Enter execution mode.
2105 */
2106 PATMRawEnter(pVM, pCtx);
2107
2108 /*
2109 * Set CPL to Ring-1.
2110 */
2111 pCtx->ss.Sel |= 1;
2112 if ( pCtx->cs.Sel
2113 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2114 pCtx->cs.Sel |= 1;
2115 }
2116 else
2117 {
2118# ifdef VBOX_WITH_RAW_RING1
2119 if ( EMIsRawRing1Enabled(pVM)
2120 && !pCtx->eflags.Bits.u1VM
2121 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2122 {
2123 /* Set CPL to Ring-2. */
2124 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2125 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2126 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2127 }
2128# else
2129 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2130 ("ring-1 code not supported\n"));
2131# endif
2132 /*
2133 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2134 */
2135 PATMRawEnter(pVM, pCtx);
2136 }
2137
2138 /*
2139 * Assert sanity.
2140 */
2141 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2142 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2143 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2144 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE));
2145
2146 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2147
2148 pVCpu->cpum.s.fRawEntered = true;
2149 return VINF_SUCCESS;
2150}
2151
2152
2153/**
2154 * Transforms the guest CPU state from raw-ring mode to correct values.
2155 *
2156 * This function will change any selector registers with DPL=1 to DPL=0.
2157 *
2158 * @returns Adjusted rc.
2159 * @param pVCpu The cross context virtual CPU structure.
2160 * @param rc Raw mode return code
2161 * @see @ref pg_raw
2162 */
2163VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2164{
2165 PVM pVM = pVCpu->CTX_SUFF(pVM);
2166
2167 /*
2168 * Don't leave if we've already left (in RC).
2169 */
2170 Assert(!pVCpu->cpum.s.fRemEntered);
2171 if (!pVCpu->cpum.s.fRawEntered)
2172 return rc;
2173 pVCpu->cpum.s.fRawEntered = false;
2174
2175 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2176 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2177 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2178 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2179
2180 /*
2181 * Are we executing in raw ring-1?
2182 */
2183 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2184 && !pCtx->eflags.Bits.u1VM)
2185 {
2186 /*
2187 * Leave execution mode.
2188 */
2189 PATMRawLeave(pVM, pCtx, rc);
2190 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2191 /** @todo See what happens if we remove this. */
2192 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2193 pCtx->ds.Sel &= ~X86_SEL_RPL;
2194 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2195 pCtx->es.Sel &= ~X86_SEL_RPL;
2196 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2197 pCtx->fs.Sel &= ~X86_SEL_RPL;
2198 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2199 pCtx->gs.Sel &= ~X86_SEL_RPL;
2200
2201 /*
2202 * Ring-1 selector => Ring-0.
2203 */
2204 pCtx->ss.Sel &= ~X86_SEL_RPL;
2205 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2206 pCtx->cs.Sel &= ~X86_SEL_RPL;
2207 }
2208 else
2209 {
2210 /*
2211 * PATM is taking care of the IOPL and IF flags for us.
2212 */
2213 PATMRawLeave(pVM, pCtx, rc);
2214 if (!pCtx->eflags.Bits.u1VM)
2215 {
2216# ifdef VBOX_WITH_RAW_RING1
2217 if ( EMIsRawRing1Enabled(pVM)
2218 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2219 {
2220 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2221 /** @todo See what happens if we remove this. */
2222 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2223 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2224 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2225 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2226 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2227 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2228 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2229 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2230
2231 /*
2232 * Ring-2 selector => Ring-1.
2233 */
2234 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2235 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2236 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2237 }
2238 else
2239 {
2240# endif
2241 /** @todo See what happens if we remove this. */
2242 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2243 pCtx->ds.Sel &= ~X86_SEL_RPL;
2244 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2245 pCtx->es.Sel &= ~X86_SEL_RPL;
2246 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2247 pCtx->fs.Sel &= ~X86_SEL_RPL;
2248 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2249 pCtx->gs.Sel &= ~X86_SEL_RPL;
2250# ifdef VBOX_WITH_RAW_RING1
2251 }
2252# endif
2253 }
2254 }
2255
2256 return rc;
2257}
2258
2259#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2260
2261/**
2262 * Updates the EFLAGS while we're in raw-mode.
2263 *
2264 * @param pVCpu The cross context virtual CPU structure.
2265 * @param fEfl The new EFLAGS value.
2266 */
2267VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2268{
2269#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2270 if (pVCpu->cpum.s.fRawEntered)
2271 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2272 else
2273#endif
2274 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2275}
2276
2277
2278/**
2279 * Gets the EFLAGS while we're in raw-mode.
2280 *
2281 * @returns The eflags.
2282 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2283 */
2284VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2285{
2286#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2287 if (pVCpu->cpum.s.fRawEntered)
2288 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2289#endif
2290 return pVCpu->cpum.s.Guest.eflags.u32;
2291}
2292
2293
2294/**
2295 * Sets the specified changed flags (CPUM_CHANGED_*).
2296 *
2297 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2298 * @param fChangedAdd The changed flags to add.
2299 */
2300VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2301{
2302 pVCpu->cpum.s.fChanged |= fChangedAdd;
2303}
2304
2305
2306/**
2307 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2308 *
2309 * @returns true if supported.
2310 * @returns false if not supported.
2311 * @param pVM The cross context VM structure.
2312 */
2313VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2314{
2315 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2316}
2317
2318
2319/**
2320 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2321 * @returns true if used.
2322 * @returns false if not used.
2323 * @param pVM The cross context VM structure.
2324 */
2325VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2326{
2327 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2328}
2329
2330
2331/**
2332 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2333 * @returns true if used.
2334 * @returns false if not used.
2335 * @param pVM The cross context VM structure.
2336 */
2337VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2338{
2339 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2340}
2341
2342#ifdef IN_RC
2343
2344/**
2345 * Lazily sync in the FPU/XMM state.
2346 *
2347 * @returns VBox status code.
2348 * @param pVCpu The cross context virtual CPU structure.
2349 */
2350VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2351{
2352 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2353}
2354
2355#endif /* !IN_RC */
2356
2357/**
2358 * Checks if we activated the FPU/XMM state of the guest OS.
2359 *
2360 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2361 * time we'll be executing guest code, so it may return true for 64-on-32 when
2362 * we still haven't actually loaded the FPU status, just scheduled it to be
2363 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2364 *
2365 * @returns true / false.
2366 * @param pVCpu The cross context virtual CPU structure.
2367 */
2368VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2369{
2370 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2371}
2372
2373
2374/**
2375 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2376 *
2377 * @returns true / false.
2378 * @param pVCpu The cross context virtual CPU structure.
2379 */
2380VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2381{
2382 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2383}
2384
2385
2386/**
2387 * Checks if we saved the FPU/XMM state of the host OS.
2388 *
2389 * @returns true / false.
2390 * @param pVCpu The cross context virtual CPU structure.
2391 */
2392VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2393{
2394 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2395}
2396
2397
2398/**
2399 * Checks if the guest debug state is active.
2400 *
2401 * @returns boolean
2402 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2403 */
2404VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2405{
2406 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2407}
2408
2409
2410/**
2411 * Checks if the guest debug state is to be made active during the world-switch
2412 * (currently only used for the 32->64 switcher case).
2413 *
2414 * @returns boolean
2415 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2416 */
2417VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2418{
2419 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2420}
2421
2422
2423/**
2424 * Checks if the hyper debug state is active.
2425 *
2426 * @returns boolean
2427 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2428 */
2429VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2430{
2431 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2432}
2433
2434
2435/**
2436 * Checks if the hyper debug state is to be made active during the world-switch
2437 * (currently only used for the 32->64 switcher case).
2438 *
2439 * @returns boolean
2440 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2441 */
2442VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2443{
2444 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2445}
2446
2447
2448/**
2449 * Mark the guest's debug state as inactive.
2450 *
2451 * @returns boolean
2452 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2453 * @todo This API doesn't make sense any more.
2454 */
2455VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2456{
2457 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2458 NOREF(pVCpu);
2459}
2460
2461
2462/**
2463 * Get the current privilege level of the guest.
2464 *
2465 * @returns CPL
2466 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2467 */
2468VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2469{
2470 /*
2471 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2472 *
2473 * Note! We used to check CS.DPL here, assuming it was always equal to
2474 * CPL even if a conforming segment was loaded. But this turned out to
2475 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2476 * during install after a far call to ring 2 with VT-x. Then on newer
2477 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2478 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2479 *
2480 * So, forget CS.DPL, always use SS.DPL.
2481 *
2482 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2483 * isn't necessarily equal if the segment is conforming.
2484 * See section 4.11.1 in the AMD manual.
2485 *
2486 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2487 * right after real->prot mode switch and when in V8086 mode? That
2488 * section says the RPL specified in a direct transfere (call, jmp,
2489 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2490 * it would be impossible for an exception handle or the iret
2491 * instruction to figure out whether SS:ESP are part of the frame
2492 * or not. VBox or qemu bug must've lead to this misconception.
2493 *
2494 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2495 * selector into SS with an RPL other than the CPL when CPL != 3 and
2496 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2497 * RPL = CPL. Weird.
2498 */
2499 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
2500 uint32_t uCpl;
2501 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2502 {
2503 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2504 {
2505 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2506 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2507 else
2508 {
2509 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2510#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2511# ifdef VBOX_WITH_RAW_RING1
2512 if (pVCpu->cpum.s.fRawEntered)
2513 {
2514 if ( uCpl == 2
2515 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2516 uCpl = 1;
2517 else if (uCpl == 1)
2518 uCpl = 0;
2519 }
2520 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2521# else
2522 if (uCpl == 1)
2523 uCpl = 0;
2524# endif
2525#endif
2526 }
2527 }
2528 else
2529 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2530 }
2531 else
2532 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2533 return uCpl;
2534}
2535
2536
2537/**
2538 * Gets the current guest CPU mode.
2539 *
2540 * If paging mode is what you need, check out PGMGetGuestMode().
2541 *
2542 * @returns The CPU mode.
2543 * @param pVCpu The cross context virtual CPU structure.
2544 */
2545VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2546{
2547 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
2548 CPUMMODE enmMode;
2549 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2550 enmMode = CPUMMODE_REAL;
2551 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2552 enmMode = CPUMMODE_PROTECTED;
2553 else
2554 enmMode = CPUMMODE_LONG;
2555
2556 return enmMode;
2557}
2558
2559
2560/**
2561 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2562 *
2563 * @returns 16, 32 or 64.
2564 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2565 */
2566VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2567{
2568 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
2569
2570 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2571 return 16;
2572
2573 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2574 {
2575 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2576 return 16;
2577 }
2578
2579 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2580 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2581 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2582 return 64;
2583
2584 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2585 return 32;
2586
2587 return 16;
2588}
2589
2590
2591VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2592{
2593 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
2594
2595 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2596 return DISCPUMODE_16BIT;
2597
2598 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2599 {
2600 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2601 return DISCPUMODE_16BIT;
2602 }
2603
2604 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2605 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2606 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2607 return DISCPUMODE_64BIT;
2608
2609 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2610 return DISCPUMODE_32BIT;
2611
2612 return DISCPUMODE_16BIT;
2613}
2614
2615
2616/**
2617 * Gets the guest MXCSR_MASK value.
2618 *
2619 * This does not access the x87 state, but the value we determined at VM
2620 * initialization.
2621 *
2622 * @returns MXCSR mask.
2623 * @param pVM The cross context VM structure.
2624 */
2625VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
2626{
2627 return pVM->cpum.s.GuestInfo.fMxCsrMask;
2628}
2629
2630
2631/**
2632 * Returns whether the guest has physical interrupts enabled.
2633 *
2634 * @returns @c true if interrupts are enabled, @c false otherwise.
2635 * @param pVCpu The cross context virtual CPU structure.
2636 *
2637 * @remarks Warning! This function does -not- take into account the global-interrupt
2638 * flag (GIF).
2639 */
2640VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
2641{
2642 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
2643 {
2644#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2645 uint32_t const fEFlags = !pVCpu->cpum.s.fRawEntered ? pVCpu->cpum.s.Guest.eflags.u : CPUMRawGetEFlags(pVCpu);
2646#else
2647 uint32_t const fEFlags = pVCpu->cpum.s.Guest.eflags.u;
2648#endif
2649 return RT_BOOL(fEFlags & X86_EFL_IF);
2650 }
2651
2652 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
2653 return CPUMIsGuestVmxPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2654
2655 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
2656 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2657}
2658
2659
2660/**
2661 * Returns whether the nested-guest has virtual interrupts enabled.
2662 *
2663 * @returns @c true if interrupts are enabled, @c false otherwise.
2664 * @param pVCpu The cross context virtual CPU structure.
2665 *
2666 * @remarks Warning! This function does -not- take into account the global-interrupt
2667 * flag (GIF).
2668 */
2669VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
2670{
2671 Assert(CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest));
2672
2673 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
2674 return CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2675
2676 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
2677 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2678}
2679
2680
2681/**
2682 * Calculates the interruptiblity of the guest.
2683 *
2684 * @returns Interruptibility level.
2685 * @param pVCpu The cross context virtual CPU structure.
2686 */
2687VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
2688{
2689#if 1
2690 /* Global-interrupt flag blocks pretty much everything we care about here. */
2691 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
2692 {
2693 /*
2694 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
2695 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
2696 * or raw-mode). Hence we use the function below which handles the details.
2697 */
2698 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
2699 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
2700 {
2701 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
2702 || CPUMIsGuestVirtIntrEnabled(pVCpu))
2703 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
2704
2705 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
2706 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
2707 }
2708
2709 /*
2710 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
2711 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
2712 * However, there is some uncertainity regarding the converse, i.e. whether
2713 * NMI-blocking until IRET blocks delivery of physical interrupts.
2714 *
2715 * See Intel spec. 25.4.1 "Event Blocking".
2716 */
2717 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2718 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2719
2720 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2721 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2722
2723 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2724 }
2725 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2726#else
2727 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
2728 {
2729 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2730 {
2731 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
2732 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
2733
2734 /** @todo does blocking NMIs mean interrupts are also inhibited? */
2735 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2736 {
2737 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2738 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2739 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2740 }
2741 AssertFailed();
2742 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2743 }
2744 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2745 }
2746 else
2747 {
2748 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2749 {
2750 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2751 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2752 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2753 }
2754 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2755 }
2756#endif
2757}
2758
2759
2760/**
2761 * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.
2762 *
2763 * @returns @c true if NMIs are blocked, @c false otherwise.
2764 * @param pVCpu The cross context virtual CPU structure.
2765 */
2766VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
2767{
2768#ifndef IN_RC
2769 /*
2770 * Return the state of guest-NMI blocking in any of the following cases:
2771 * - We're not executing a nested-guest.
2772 * - We're executing an SVM nested-guest[1].
2773 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2774 *
2775 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2776 * SVM hypervisors must track NMI blocking themselves by intercepting
2777 * the IRET instruction after injection of an NMI.
2778 */
2779 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2780 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2781 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2782 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2783 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2784
2785 /*
2786 * Return the state of virtual-NMI blocking, if we are executing a
2787 * VMX nested-guest with virtual-NMIs enabled.
2788 */
2789 return CPUMIsGuestVmxVirtNmiBlocking(pVCpu, pCtx);
2790#else
2791 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2792#endif
2793}
2794
2795
2796/**
2797 * Sets blocking delivery of NMIs to the guest.
2798 *
2799 * @param pVCpu The cross context virtual CPU structure.
2800 * @param fBlock Whether NMIs are blocked or not.
2801 */
2802VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
2803{
2804#ifndef IN_RC
2805 /*
2806 * Set the state of guest-NMI blocking in any of the following cases:
2807 * - We're not executing a nested-guest.
2808 * - We're executing an SVM nested-guest[1].
2809 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2810 *
2811 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2812 * SVM hypervisors must track NMI blocking themselves by intercepting
2813 * the IRET instruction after injection of an NMI.
2814 */
2815 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2816 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2817 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2818 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2819 {
2820 if (fBlock)
2821 {
2822 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2823 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2824 }
2825 else
2826 {
2827 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2828 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2829 }
2830 return;
2831 }
2832
2833 /*
2834 * Set the state of virtual-NMI blocking, if we are executing a
2835 * VMX nested-guest with virtual-NMIs enabled.
2836 */
2837 return CPUMSetGuestVmxVirtNmiBlocking(pVCpu, pCtx, fBlock);
2838#else
2839 if (fBlock)
2840 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2841 else
2842 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2843#endif
2844}
2845
2846
2847/**
2848 * Checks whether the SVM nested-guest has physical interrupts enabled.
2849 *
2850 * @returns true if interrupts are enabled, false otherwise.
2851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2852 * @param pCtx The guest-CPU context.
2853 *
2854 * @remarks This does -not- take into account the global-interrupt flag.
2855 */
2856VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2857{
2858 /** @todo Optimization: Avoid this function call and use a pointer to the
2859 * relevant eflags instead (setup during VMRUN instruction emulation). */
2860#ifdef IN_RC
2861 RT_NOREF2(pVCpu, pCtx);
2862 AssertReleaseFailedReturn(false);
2863#else
2864 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2865
2866 X86EFLAGS fEFlags;
2867 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2868 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2869 else
2870 fEFlags.u = pCtx->eflags.u;
2871
2872 return fEFlags.Bits.u1IF;
2873#endif
2874}
2875
2876
2877/**
2878 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2879 * for injection by VMRUN instruction) interrupts.
2880 *
2881 * @returns VBox status code.
2882 * @retval true if it's ready, false otherwise.
2883 *
2884 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2885 * @param pCtx The guest-CPU context.
2886 */
2887VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2888{
2889#ifdef IN_RC
2890 RT_NOREF2(pVCpu, pCtx);
2891 AssertReleaseFailedReturn(false);
2892#else
2893 RT_NOREF(pVCpu);
2894 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2895
2896 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2897 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2898 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2899 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2900 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2901 return false;
2902
2903 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2904#endif
2905}
2906
2907
2908/**
2909 * Gets the pending SVM nested-guest interruptvector.
2910 *
2911 * @returns The nested-guest interrupt to inject.
2912 * @param pCtx The guest-CPU context.
2913 */
2914VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2915{
2916#ifdef IN_RC
2917 RT_NOREF(pCtx);
2918 AssertReleaseFailedReturn(0);
2919#else
2920 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2921 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
2922#endif
2923}
2924
2925
2926/**
2927 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2928 *
2929 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2930 * @param pCtx The guest-CPU context.
2931 */
2932VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx)
2933{
2934 /*
2935 * Reload the guest's "host state".
2936 */
2937 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2938 pCtx->es = pHostState->es;
2939 pCtx->cs = pHostState->cs;
2940 pCtx->ss = pHostState->ss;
2941 pCtx->ds = pHostState->ds;
2942 pCtx->gdtr = pHostState->gdtr;
2943 pCtx->idtr = pHostState->idtr;
2944 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2945 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2946 pCtx->cr3 = pHostState->uCr3;
2947 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2948 pCtx->rflags = pHostState->rflags;
2949 pCtx->rflags.Bits.u1VM = 0;
2950 pCtx->rip = pHostState->uRip;
2951 pCtx->rsp = pHostState->uRsp;
2952 pCtx->rax = pHostState->uRax;
2953 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2954 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2955 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2956
2957 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2958 * raise \#GP(0) in the guest. */
2959
2960 /** @todo check the loaded host-state for consistency. Figure out what
2961 * exactly this involves? */
2962}
2963
2964
2965/**
2966 * Saves the host-state to the host-state save area as part of a VMRUN.
2967 *
2968 * @param pCtx The guest-CPU context.
2969 * @param cbInstr The length of the VMRUN instruction in bytes.
2970 */
2971VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2972{
2973 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2974 pHostState->es = pCtx->es;
2975 pHostState->cs = pCtx->cs;
2976 pHostState->ss = pCtx->ss;
2977 pHostState->ds = pCtx->ds;
2978 pHostState->gdtr = pCtx->gdtr;
2979 pHostState->idtr = pCtx->idtr;
2980 pHostState->uEferMsr = pCtx->msrEFER;
2981 pHostState->uCr0 = pCtx->cr0;
2982 pHostState->uCr3 = pCtx->cr3;
2983 pHostState->uCr4 = pCtx->cr4;
2984 pHostState->rflags = pCtx->rflags;
2985 pHostState->uRip = pCtx->rip + cbInstr;
2986 pHostState->uRsp = pCtx->rsp;
2987 pHostState->uRax = pCtx->rax;
2988}
2989
2990
2991/**
2992 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2993 * nested-guest.
2994 *
2995 * @returns The TSC offset after applying any nested-guest TSC offset.
2996 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2997 * @param uTicks The guest TSC.
2998 *
2999 * @sa CPUMRemoveNestedGuestTscOffset.
3000 */
3001VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
3002{
3003#ifndef IN_RC
3004 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3005 if (CPUMIsGuestInVmxNonRootMode(pCtx))
3006 {
3007 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
3008 Assert(pVmcs);
3009 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
3010 return uTicks + pVmcs->u64TscOffset.u;
3011 return uTicks;
3012 }
3013
3014 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3015 {
3016 uint64_t u64TscOffset;
3017 if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
3018 {
3019 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
3020 Assert(pVmcb);
3021 u64TscOffset = pVmcb->ctrl.u64TSCOffset;
3022 }
3023 return uTicks + u64TscOffset;
3024 }
3025#else
3026 RT_NOREF(pVCpu);
3027#endif
3028 return uTicks;
3029}
3030
3031
3032/**
3033 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
3034 * guest.
3035 *
3036 * @returns The TSC offset after removing any nested-guest TSC offset.
3037 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3038 * @param uTicks The nested-guest TSC.
3039 *
3040 * @sa CPUMApplyNestedGuestTscOffset.
3041 */
3042VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
3043{
3044#ifndef IN_RC
3045 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3046 if (CPUMIsGuestInVmxNonRootMode(pCtx))
3047 {
3048 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
3049 {
3050 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
3051 Assert(pVmcs);
3052 return uTicks - pVmcs->u64TscOffset.u;
3053 }
3054 return uTicks;
3055 }
3056
3057 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3058 {
3059 uint64_t u64TscOffset;
3060 if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
3061 {
3062 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
3063 Assert(pVmcb);
3064 u64TscOffset = pVmcb->ctrl.u64TSCOffset;
3065 }
3066 return uTicks - u64TscOffset;
3067 }
3068#else
3069 RT_NOREF(pVCpu);
3070#endif
3071 return uTicks;
3072}
3073
3074
3075/**
3076 * Used to dynamically imports state residing in NEM or HM.
3077 *
3078 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
3079 *
3080 * @returns VBox status code.
3081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3082 * @param fExtrnImport The fields to import.
3083 * @thread EMT(pVCpu)
3084 */
3085VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPU pVCpu, uint64_t fExtrnImport)
3086{
3087 VMCPU_ASSERT_EMT(pVCpu);
3088 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
3089 {
3090#ifndef IN_RC
3091 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
3092 {
3093 case CPUMCTX_EXTRN_KEEPER_NEM:
3094 {
3095 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
3096 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
3097 return rc;
3098 }
3099
3100 case CPUMCTX_EXTRN_KEEPER_HM:
3101 {
3102#ifdef IN_RING0
3103 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
3104 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
3105 return rc;
3106#else
3107 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
3108 return VINF_SUCCESS;
3109#endif
3110 }
3111 default:
3112 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
3113 }
3114#else
3115 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
3116#endif
3117 }
3118 return VINF_SUCCESS;
3119}
3120
3121
3122/**
3123 * Gets valid CR4 bits for the guest.
3124 *
3125 * @returns Valid CR4 bits.
3126 * @param pVM The cross context VM structure.
3127 */
3128VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
3129{
3130 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
3131 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
3132 | X86_CR4_TSD | X86_CR4_DE
3133 | X86_CR4_PSE | X86_CR4_PAE
3134 | X86_CR4_MCE | X86_CR4_PGE
3135 | X86_CR4_PCE
3136 | X86_CR4_OSXMMEEXCPT; /** @todo r=ramshankar: Introduced in Pentium III along with SSE. Check fSse here? */
3137 if (pGuestFeatures->fFxSaveRstor)
3138 fMask |= X86_CR4_OSFXSR;
3139 if (pGuestFeatures->fVmx)
3140 fMask |= X86_CR4_VMXE;
3141 if (pGuestFeatures->fXSaveRstor)
3142 fMask |= X86_CR4_OSXSAVE;
3143 if (pGuestFeatures->fPcid)
3144 fMask |= X86_CR4_PCIDE;
3145 if (pGuestFeatures->fFsGsBase)
3146 fMask |= X86_CR4_FSGSBASE;
3147 return fMask;
3148}
3149
3150
3151/**
3152 * Gets the read and write permission bits for an MSR in an MSR bitmap.
3153 *
3154 * @returns VMXMSRPM_XXX - the MSR permission.
3155 * @param pvMsrBitmap Pointer to the MSR bitmap.
3156 * @param idMsr The MSR to get permissions for.
3157 *
3158 * @sa hmR0VmxSetMsrPermission.
3159 */
3160VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
3161{
3162 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
3163
3164 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
3165
3166 /*
3167 * MSR Layout:
3168 * Byte index MSR range Interpreted as
3169 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
3170 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
3171 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
3172 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
3173 *
3174 * A bit corresponding to an MSR within the above range causes a VM-exit
3175 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
3176 * the MSR range, it always cause a VM-exit.
3177 *
3178 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
3179 */
3180 uint32_t const offBitmapRead = 0;
3181 uint32_t const offBitmapWrite = 0x800;
3182 uint32_t offMsr;
3183 uint32_t iBit;
3184 if (idMsr <= UINT32_C(0x00001fff))
3185 {
3186 offMsr = 0;
3187 iBit = idMsr;
3188 }
3189 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
3190 {
3191 offMsr = 0x400;
3192 iBit = idMsr - UINT32_C(0xc0000000);
3193 }
3194 else
3195 {
3196 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
3197 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
3198 }
3199
3200 /*
3201 * Get the MSR read permissions.
3202 */
3203 uint32_t fRet;
3204 uint32_t const offMsrRead = offBitmapRead + offMsr;
3205 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
3206 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
3207 fRet = VMXMSRPM_EXIT_RD;
3208 else
3209 fRet = VMXMSRPM_ALLOW_RD;
3210
3211 /*
3212 * Get the MSR write permissions.
3213 */
3214 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
3215 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
3216 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
3217 fRet |= VMXMSRPM_EXIT_WR;
3218 else
3219 fRet |= VMXMSRPM_ALLOW_WR;
3220
3221 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
3222 return fRet;
3223}
3224
3225
3226/**
3227 * Gets the permission bits for the specified I/O port from the given I/O bitmaps.
3228 *
3229 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
3230 * @param pvIoBitmapA Pointer to I/O bitmap A.
3231 * @param pvIoBitmapB Pointer to I/O bitmap B.
3232 * @param uPort The I/O port being accessed.
3233 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3234 */
3235VMM_INT_DECL(bool) CPUMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
3236 uint8_t cbAccess)
3237{
3238 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3239
3240 /*
3241 * If the I/O port access wraps around the 16-bit port I/O space,
3242 * we must cause a VM-exit.
3243 *
3244 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3245 */
3246 /** @todo r=ramshankar: Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc
3247 * respectively are valid and do not constitute a wrap around from what I
3248 * understand. Verify this later. */
3249 uint32_t const uPortLast = uPort + cbAccess;
3250 if (uPortLast > 0x10000)
3251 return true;
3252
3253 /* Read the appropriate bit from the corresponding IO bitmap. */
3254 void const *pvIoBitmap = uPort < 0x8000 ? pvIoBitmapA : pvIoBitmapB;
3255 return ASMBitTest(pvIoBitmap, uPort);
3256}
3257
3258
3259/**
3260 * Returns whether the given VMCS field is valid and supported for the guest.
3261 *
3262 * @param pVM The cross context VM structure.
3263 * @param u64VmcsField The VMCS field.
3264 *
3265 * @remarks This takes into account the CPU features exposed to the guest.
3266 */
3267VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVM pVM, uint64_t u64VmcsField)
3268{
3269#ifndef IN_RC
3270 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
3271 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
3272 if (!uFieldEncHi)
3273 { /* likely */ }
3274 else
3275 return false;
3276
3277 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
3278 switch (uFieldEncLo)
3279 {
3280 /*
3281 * 16-bit fields.
3282 */
3283 /* Control fields. */
3284 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
3285 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
3286 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
3287
3288 /* Guest-state fields. */
3289 case VMX_VMCS16_GUEST_ES_SEL:
3290 case VMX_VMCS16_GUEST_CS_SEL:
3291 case VMX_VMCS16_GUEST_SS_SEL:
3292 case VMX_VMCS16_GUEST_DS_SEL:
3293 case VMX_VMCS16_GUEST_FS_SEL:
3294 case VMX_VMCS16_GUEST_GS_SEL:
3295 case VMX_VMCS16_GUEST_LDTR_SEL:
3296 case VMX_VMCS16_GUEST_TR_SEL: return true;
3297 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
3298 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
3299
3300 /* Host-state fields. */
3301 case VMX_VMCS16_HOST_ES_SEL:
3302 case VMX_VMCS16_HOST_CS_SEL:
3303 case VMX_VMCS16_HOST_SS_SEL:
3304 case VMX_VMCS16_HOST_DS_SEL:
3305 case VMX_VMCS16_HOST_FS_SEL:
3306 case VMX_VMCS16_HOST_GS_SEL:
3307 case VMX_VMCS16_HOST_TR_SEL: return true;
3308
3309 /*
3310 * 64-bit fields.
3311 */
3312 /* Control fields. */
3313 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
3314 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
3315 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
3316 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
3317 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
3318 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
3319 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
3320 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
3321 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
3322 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
3323 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
3324 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
3325 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
3326 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
3327 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
3328 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
3329 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
3330 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
3331 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
3332 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
3333 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
3334 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
3335 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
3336 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
3337 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
3338 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
3339 case VMX_VMCS64_CTRL_EPTP_FULL:
3340 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
3341 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
3342 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
3343 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
3344 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
3345 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
3346 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
3347 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
3348 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
3349 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
3350 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
3351 {
3352 PCVMCPU pVCpu = &pVM->aCpus[0];
3353 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
3354 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
3355 }
3356 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
3357 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
3358 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
3359 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
3360 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
3361 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
3362 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
3363 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
3364 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
3365 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
3366 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
3367 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
3368
3369 /* Read-only data fields. */
3370 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
3371 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
3372
3373 /* Guest-state fields. */
3374 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
3375 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
3376 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
3377 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
3378 case VMX_VMCS64_GUEST_PAT_FULL:
3379 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
3380 case VMX_VMCS64_GUEST_EFER_FULL:
3381 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
3382 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
3383 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
3384 case VMX_VMCS64_GUEST_PDPTE0_FULL:
3385 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
3386 case VMX_VMCS64_GUEST_PDPTE1_FULL:
3387 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
3388 case VMX_VMCS64_GUEST_PDPTE2_FULL:
3389 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
3390 case VMX_VMCS64_GUEST_PDPTE3_FULL:
3391 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
3392 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
3393 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
3394
3395 /* Host-state fields. */
3396 case VMX_VMCS64_HOST_PAT_FULL:
3397 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
3398 case VMX_VMCS64_HOST_EFER_FULL:
3399 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
3400 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
3401 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
3402
3403 /*
3404 * 32-bit fields.
3405 */
3406 /* Control fields. */
3407 case VMX_VMCS32_CTRL_PIN_EXEC:
3408 case VMX_VMCS32_CTRL_PROC_EXEC:
3409 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
3410 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
3411 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
3412 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
3413 case VMX_VMCS32_CTRL_EXIT:
3414 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
3415 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
3416 case VMX_VMCS32_CTRL_ENTRY:
3417 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
3418 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
3419 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
3420 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
3421 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
3422 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
3423 case VMX_VMCS32_CTRL_PLE_GAP:
3424 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
3425
3426 /* Read-only data fields. */
3427 case VMX_VMCS32_RO_VM_INSTR_ERROR:
3428 case VMX_VMCS32_RO_EXIT_REASON:
3429 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
3430 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
3431 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
3432 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
3433 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
3434 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
3435
3436 /* Guest-state fields. */
3437 case VMX_VMCS32_GUEST_ES_LIMIT:
3438 case VMX_VMCS32_GUEST_CS_LIMIT:
3439 case VMX_VMCS32_GUEST_SS_LIMIT:
3440 case VMX_VMCS32_GUEST_DS_LIMIT:
3441 case VMX_VMCS32_GUEST_FS_LIMIT:
3442 case VMX_VMCS32_GUEST_GS_LIMIT:
3443 case VMX_VMCS32_GUEST_LDTR_LIMIT:
3444 case VMX_VMCS32_GUEST_TR_LIMIT:
3445 case VMX_VMCS32_GUEST_GDTR_LIMIT:
3446 case VMX_VMCS32_GUEST_IDTR_LIMIT:
3447 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
3448 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
3449 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
3450 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
3451 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
3452 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
3453 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
3454 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
3455 case VMX_VMCS32_GUEST_INT_STATE:
3456 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
3457 case VMX_VMCS32_GUEST_SMBASE:
3458 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
3459 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
3460
3461 /* Host-state fields. */
3462 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
3463
3464 /*
3465 * Natural-width fields.
3466 */
3467 /* Control fields. */
3468 case VMX_VMCS_CTRL_CR0_MASK:
3469 case VMX_VMCS_CTRL_CR4_MASK:
3470 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
3471 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
3472 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
3473 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
3474 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
3475 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
3476
3477 /* Read-only data fields. */
3478 case VMX_VMCS_RO_EXIT_QUALIFICATION:
3479 case VMX_VMCS_RO_IO_RCX:
3480 case VMX_VMCS_RO_IO_RSI:
3481 case VMX_VMCS_RO_IO_RDI:
3482 case VMX_VMCS_RO_IO_RIP:
3483 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
3484
3485 /* Guest-state fields. */
3486 case VMX_VMCS_GUEST_CR0:
3487 case VMX_VMCS_GUEST_CR3:
3488 case VMX_VMCS_GUEST_CR4:
3489 case VMX_VMCS_GUEST_ES_BASE:
3490 case VMX_VMCS_GUEST_CS_BASE:
3491 case VMX_VMCS_GUEST_SS_BASE:
3492 case VMX_VMCS_GUEST_DS_BASE:
3493 case VMX_VMCS_GUEST_FS_BASE:
3494 case VMX_VMCS_GUEST_GS_BASE:
3495 case VMX_VMCS_GUEST_LDTR_BASE:
3496 case VMX_VMCS_GUEST_TR_BASE:
3497 case VMX_VMCS_GUEST_GDTR_BASE:
3498 case VMX_VMCS_GUEST_IDTR_BASE:
3499 case VMX_VMCS_GUEST_DR7:
3500 case VMX_VMCS_GUEST_RSP:
3501 case VMX_VMCS_GUEST_RIP:
3502 case VMX_VMCS_GUEST_RFLAGS:
3503 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
3504 case VMX_VMCS_GUEST_SYSENTER_ESP:
3505 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
3506
3507 /* Host-state fields. */
3508 case VMX_VMCS_HOST_CR0:
3509 case VMX_VMCS_HOST_CR3:
3510 case VMX_VMCS_HOST_CR4:
3511 case VMX_VMCS_HOST_FS_BASE:
3512 case VMX_VMCS_HOST_GS_BASE:
3513 case VMX_VMCS_HOST_TR_BASE:
3514 case VMX_VMCS_HOST_GDTR_BASE:
3515 case VMX_VMCS_HOST_IDTR_BASE:
3516 case VMX_VMCS_HOST_SYSENTER_ESP:
3517 case VMX_VMCS_HOST_SYSENTER_EIP:
3518 case VMX_VMCS_HOST_RSP:
3519 case VMX_VMCS_HOST_RIP: return true;
3520 }
3521
3522 return false;
3523#else
3524 RT_NOREF2(pVM, u64VmcsField);
3525 return false;
3526#endif
3527}
3528
3529
3530/**
3531 * Checks whether the given I/O access should cause a nested-guest VM-exit.
3532 *
3533 * @returns @c true if it causes a VM-exit, @c false otherwise.
3534 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3535 * @param u16Port The I/O port being accessed.
3536 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3537 */
3538VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
3539{
3540#ifndef IN_RC
3541 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3542 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
3543 return true;
3544
3545 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
3546 {
3547 uint8_t const *pbIoBitmapA = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap);
3548 uint8_t const *pbIoBitmapB = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
3549 Assert(pbIoBitmapA);
3550 Assert(pbIoBitmapB);
3551 return CPUMGetVmxIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
3552 }
3553
3554 return false;
3555#else
3556 RT_NOREF3(pVCpu, u16Port, cbAccess);
3557 return false;
3558#endif
3559}
3560
3561
3562/**
3563 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
3564 *
3565 * @returns @c true if it causes a VM-exit, @c false otherwise.
3566 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3567 * @param uNewCr3 The CR3 value being written.
3568 */
3569VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
3570{
3571#ifndef IN_RC
3572 /*
3573 * If the CR3-load exiting control is set and the new CR3 value does not
3574 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3575 *
3576 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3577 */
3578 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3579 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
3580 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
3581 {
3582 uint32_t const uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3583 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3584
3585 /* If the CR3-target count is 0, cause a VM-exit. */
3586 if (uCr3TargetCount == 0)
3587 return true;
3588
3589 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
3590 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
3591 if ( uNewCr3 != pVmcs->u64Cr3Target0.u
3592 && uNewCr3 != pVmcs->u64Cr3Target1.u
3593 && uNewCr3 != pVmcs->u64Cr3Target2.u
3594 && uNewCr3 != pVmcs->u64Cr3Target3.u)
3595 return true;
3596 }
3597 return false;
3598#else
3599 RT_NOREF2(pVCpu, uNewCr3);
3600 return false;
3601#endif
3602}
3603
3604
3605/**
3606 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
3607 * VM-exit or not.
3608 *
3609 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
3610 * @param pVCpu The cross context virtual CPU structure.
3611 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
3612 * VMX_EXIT_VMREAD).
3613 * @param u64VmcsField The VMCS field.
3614 */
3615VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
3616{
3617#ifndef IN_RC
3618 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
3619 Assert( uExitReason == VMX_EXIT_VMREAD
3620 || uExitReason == VMX_EXIT_VMWRITE);
3621
3622 /*
3623 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
3624 */
3625 if (!CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
3626 return true;
3627
3628 /*
3629 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
3630 * is intercepted. This excludes any reserved bits in the valid parts of the field
3631 * encoding (i.e. bit 12).
3632 */
3633 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
3634 return true;
3635
3636 /*
3637 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
3638 */
3639 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
3640 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
3641 ? (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
3642 : (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
3643 Assert(pbBitmap);
3644 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3645 return ASMBitTest(pbBitmap + (u32VmcsField >> 3), u32VmcsField & 7);
3646#else
3647 RT_NOREF3(pVCpu, uExitReason, u64VmcsField);
3648 return false;
3649#endif
3650}
3651
3652
3653
3654/**
3655 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
3656 *
3657 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
3658 * @param u16Port The IO port being accessed.
3659 * @param enmIoType The type of IO access.
3660 * @param cbReg The IO operand size in bytes.
3661 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
3662 * @param iEffSeg The effective segment number.
3663 * @param fRep Whether this is a repeating IO instruction (REP prefix).
3664 * @param fStrIo Whether this is a string IO instruction.
3665 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
3666 * Optional, can be NULL.
3667 */
3668VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
3669 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
3670 PSVMIOIOEXITINFO pIoExitInfo)
3671{
3672 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
3673 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
3674
3675 /*
3676 * The IOPM layout:
3677 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
3678 * two 4K pages.
3679 *
3680 * For IO instructions that access more than a single byte, the permission bits
3681 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
3682 *
3683 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
3684 * we need 3 extra bits beyond the second 4K page.
3685 */
3686 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
3687
3688 uint16_t const offIopm = u16Port >> 3;
3689 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
3690 uint8_t const cShift = u16Port - (offIopm << 3);
3691 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
3692
3693 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
3694 Assert(pbIopm);
3695 pbIopm += offIopm;
3696 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
3697 if (u16Iopm & fIopmMask)
3698 {
3699 if (pIoExitInfo)
3700 {
3701 static const uint32_t s_auIoOpSize[] =
3702 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
3703
3704 static const uint32_t s_auIoAddrSize[] =
3705 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
3706
3707 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
3708 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
3709 pIoExitInfo->n.u1Str = fStrIo;
3710 pIoExitInfo->n.u1Rep = fRep;
3711 pIoExitInfo->n.u3Seg = iEffSeg & 7;
3712 pIoExitInfo->n.u1Type = enmIoType;
3713 pIoExitInfo->n.u16Port = u16Port;
3714 }
3715 return true;
3716 }
3717
3718 /** @todo remove later (for debugging as VirtualBox always traps all IO
3719 * intercepts). */
3720 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
3721 return false;
3722}
3723
3724
3725/**
3726 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
3727 *
3728 * @returns VBox status code.
3729 * @param idMsr The MSR being requested.
3730 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
3731 * bitmap for @a idMsr.
3732 * @param puMsrpmBit Where to store the bit offset starting at the byte
3733 * returned in @a pbOffMsrpm.
3734 */
3735VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
3736{
3737 Assert(pbOffMsrpm);
3738 Assert(puMsrpmBit);
3739
3740 /*
3741 * MSRPM Layout:
3742 * Byte offset MSR range
3743 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
3744 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
3745 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
3746 * 0x1800 - 0x1fff Reserved
3747 *
3748 * Each MSR is represented by 2 permission bits (read and write).
3749 */
3750 if (idMsr <= 0x00001fff)
3751 {
3752 /* Pentium-compatible MSRs. */
3753 uint32_t const bitoffMsr = idMsr << 1;
3754 *pbOffMsrpm = bitoffMsr >> 3;
3755 *puMsrpmBit = bitoffMsr & 7;
3756 return VINF_SUCCESS;
3757 }
3758
3759 if ( idMsr >= 0xc0000000
3760 && idMsr <= 0xc0001fff)
3761 {
3762 /* AMD Sixth Generation x86 Processor MSRs. */
3763 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
3764 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
3765 *puMsrpmBit = bitoffMsr & 7;
3766 return VINF_SUCCESS;
3767 }
3768
3769 if ( idMsr >= 0xc0010000
3770 && idMsr <= 0xc0011fff)
3771 {
3772 /* AMD Seventh and Eighth Generation Processor MSRs. */
3773 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
3774 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
3775 *puMsrpmBit = bitoffMsr & 7;
3776 return VINF_SUCCESS;
3777 }
3778
3779 *pbOffMsrpm = 0;
3780 *puMsrpmBit = 0;
3781 return VERR_OUT_OF_RANGE;
3782}
3783
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette