VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 46517

Last change on this file since 46517 was 46517, checked in by vboxsync, 12 years ago

VMM/HMVMXR0: Don't need to update guest APIC state (i.e. TPR) for nested page faults that are not MMIO.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 387.0 KB
Line 
1/* $Id: HMVMXR0.cpp 46517 2013-06-13 09:27:02Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HWVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#ifdef DEBUG_ramshankar
38#define HMVMX_SAVE_FULL_GUEST_STATE
39#define HMVMX_SYNC_FULL_GUEST_STATE
40#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
41#define HMVMX_ALWAYS_TRAP_PF
42#endif
43
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define HMVMXHCUINTREG RTHCUINTREG
49#if defined(RT_ARCH_AMD64)
50# define HMVMX_IS_64BIT_HOST_MODE() (true)
51#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
52extern "C" uint32_t g_fVMXIs64bitHost;
53# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
54# undef HMVMXHCUINTREG
55# define HMVMXHCUINTREG uint64_t
56#else
57# define HMVMX_IS_64BIT_HOST_MODE() (false)
58#endif
59
60/** Use the function table. */
61#define HMVMX_USE_FUNCTION_TABLE
62
63/** This bit indicates the segment selector is unusable in VT-x. */
64#define HMVMX_SEL_UNUSABLE RT_BIT(16)
65
66/** Determine which tagged-TLB flush handler to use. */
67#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
68#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
69#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
70#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
71
72/** @name Updated-guest-state flags.
73 * @{ */
74#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
75#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
76#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
77#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
78#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
79#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
80#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
81#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
82#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
83#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
84#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
85#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
86#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
87#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
88#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
89#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
90#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
91#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
92#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
93#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
94#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
95 | HMVMX_UPDATED_GUEST_RSP \
96 | HMVMX_UPDATED_GUEST_RFLAGS \
97 | HMVMX_UPDATED_GUEST_CR0 \
98 | HMVMX_UPDATED_GUEST_CR3 \
99 | HMVMX_UPDATED_GUEST_CR4 \
100 | HMVMX_UPDATED_GUEST_GDTR \
101 | HMVMX_UPDATED_GUEST_IDTR \
102 | HMVMX_UPDATED_GUEST_LDTR \
103 | HMVMX_UPDATED_GUEST_TR \
104 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
105 | HMVMX_UPDATED_GUEST_DEBUG \
106 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
107 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
108 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
109 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
110 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
111 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
112 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
113 | HMVMX_UPDATED_GUEST_APIC_STATE)
114/** @} */
115
116/**
117 * Flags to skip redundant reads of some common VMCS fields that are not part of
118 * the guest-CPU state but are in the transient structure.
119 */
120#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
121#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
122#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
123#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
124#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
125#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
126
127/**
128 * Exception bitmap mask for real-mode guests (real-on-v86). We need to intercept all exceptions manually (except #PF).
129 * #NM is also handled spearetely, see hmR0VmxLoadGuestControlRegs(). #PF need not be intercepted even in real-mode if
130 * we have Nested Paging support.
131 */
132#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
133 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
134 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
135 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
136 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
137 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
138 | RT_BIT(X86_XCPT_XF))
139
140/**
141 * Exception bitmap mask for all contributory exceptions.
142 */
143#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
144 | RT_BIT(X86_XCPT_DE))
145
146/** Maximum VM-instruction error number. */
147#define HMVMX_INSTR_ERROR_MAX 28
148
149/** Profiling macro. */
150#ifdef HM_PROFILE_EXIT_DISPATCH
151# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
152# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
153#else
154# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
155# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
156#endif
157
158
159/*******************************************************************************
160* Structures and Typedefs *
161*******************************************************************************/
162/** @name VMX transient.
163 *
164 * A state structure for holding miscellaneous information across
165 * VMX non-root operation and restored after the transition.
166 *
167 * @{ */
168typedef struct VMXTRANSIENT
169{
170 /** The host's rflags/eflags. */
171 RTCCUINTREG uEFlags;
172#if HC_ARCH_BITS == 32
173 uint32_t u32Alignment0;
174#endif
175 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
176 uint64_t u64LStarMsr;
177 /** The guest's TPR value used for TPR shadowing. */
178 uint8_t u8GuestTpr;
179 /** Alignment. */
180 uint8_t abAlignment0[6];
181
182 /** The basic VM-exit reason. */
183 uint16_t uExitReason;
184 /** Alignment. */
185 uint16_t u16Alignment0;
186 /** The VM-exit interruption error code. */
187 uint32_t uExitIntrErrorCode;
188 /** The VM-exit exit qualification. */
189 uint64_t uExitQualification;
190
191 /** The VM-exit interruption-information field. */
192 uint32_t uExitIntrInfo;
193 /** The VM-exit instruction-length field. */
194 uint32_t cbInstr;
195 /** Whether the VM-entry failed or not. */
196 bool fVMEntryFailed;
197 /** Alignment. */
198 uint8_t abAlignment1[5];
199
200 /** The VM-entry interruption-information field. */
201 uint32_t uEntryIntrInfo;
202 /** The VM-entry exception error code field. */
203 uint32_t uEntryXcptErrorCode;
204 /** The VM-entry instruction length field. */
205 uint32_t cbEntryInstr;
206
207 /** IDT-vectoring information field. */
208 uint32_t uIdtVectoringInfo;
209 /** IDT-vectoring error code. */
210 uint32_t uIdtVectoringErrorCode;
211
212 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
213 uint32_t fVmcsFieldsRead;
214 /** Whether TSC-offsetting should be setup before VM-entry. */
215 bool fUpdateTscOffsettingAndPreemptTimer;
216 /** Whether the VM-exit was caused by a page-fault during delivery of a
217 * contributary exception or a page-fault. */
218 bool fVectoringPF;
219} VMXTRANSIENT, *PVMXTRANSIENT;
220AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
221AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
222AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
223/** @} */
224
225
226/**
227 * MSR-bitmap read permissions.
228 */
229typedef enum VMXMSREXITREAD
230{
231 /** Reading this MSR causes a VM-exit. */
232 VMXMSREXIT_INTERCEPT_READ = 0xb,
233 /** Reading this MSR does not cause a VM-exit. */
234 VMXMSREXIT_PASSTHRU_READ
235} VMXMSREXITREAD;
236
237/**
238 * MSR-bitmap write permissions.
239 */
240typedef enum VMXMSREXITWRITE
241{
242 /** Writing to this MSR causes a VM-exit. */
243 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
244 /** Writing to this MSR does not cause a VM-exit. */
245 VMXMSREXIT_PASSTHRU_WRITE
246} VMXMSREXITWRITE;
247
248
249/*******************************************************************************
250* Internal Functions *
251*******************************************************************************/
252static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
253static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
254static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
255 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
256#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
257static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
258#endif
259#ifndef HMVMX_USE_FUNCTION_TABLE
260DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
261#define HMVMX_EXIT_DECL static int
262#else
263#define HMVMX_EXIT_DECL static DECLCALLBACK(int)
264#endif
265
266HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
267HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
268HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
269HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
270HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
271HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
272HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
273HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
274HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
275HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
276HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
277HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
278HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
279HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
280HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
281HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
282HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
283HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
284HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
285HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
286HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
287HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
288HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
289HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
290HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
291HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
292HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
293HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
294HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
295HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
296HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
297HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
298HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
299HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
300HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
301HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
302HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
303HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
304HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
305HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
306HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
307HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
308HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
309HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
310
311static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
312static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
313static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
314static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
315static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
316static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
317static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
318
319
320/*******************************************************************************
321* Global Variables *
322*******************************************************************************/
323#ifdef HMVMX_USE_FUNCTION_TABLE
324/**
325 * VM-exit handler.
326 *
327 * @returns VBox status code.
328 * @param pVCpu Pointer to the VMCPU.
329 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
330 * out-of-sync. Make sure to update the required
331 * fields before using them.
332 * @param pVmxTransient Pointer to the VMX-transient structure.
333 */
334typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
335/** Pointer to VM-exit handler. */
336typedef FNVMEXITHANDLER *const PFNVMEXITHANDLER;
337
338/**
339 * VMX_EXIT dispatch table.
340 */
341static const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
342{
343 /* 00 VMX_EXIT_XCPT_NMI */ hmR0VmxExitXcptNmi,
344 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
345 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
346 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
347 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
348 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
349 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
350 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
351 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
352 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
353 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
354 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
355 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
356 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
357 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
358 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
359 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
360 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
361 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
362 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
363 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
364 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
365 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
366 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
367 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
368 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
369 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
370 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
371 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
372 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
373 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
374 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
375 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
376 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
377 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
378 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
379 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
380 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
381 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
382 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
383 /* 40 UNDEFINED */ hmR0VmxExitPause,
384 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
385 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
386 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
387 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
388 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
389 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
390 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
391 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
392 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
393 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
394 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
395 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
396 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
397 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
398 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
399 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
400 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
401 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
402 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
403};
404#endif /* HMVMX_USE_FUNCTION_TABLE */
405
406#ifdef VBOX_STRICT
407static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
408{
409 /* 0 */ "(Not Used)",
410 /* 1 */ "VMCALL executed in VMX root operation.",
411 /* 2 */ "VMCLEAR with invalid physical address.",
412 /* 3 */ "VMCLEAR with VMXON pointer.",
413 /* 4 */ "VMLAUNCH with non-clear VMCS.",
414 /* 5 */ "VMRESUME with non-launched VMCS.",
415 /* 6 */ "VMRESUME after VMXOFF",
416 /* 7 */ "VM entry with invalid control fields.",
417 /* 8 */ "VM entry with invalid host state fields.",
418 /* 9 */ "VMPTRLD with invalid physical address.",
419 /* 10 */ "VMPTRLD with VMXON pointer.",
420 /* 11 */ "VMPTRLD with incorrect revision identifier.",
421 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
422 /* 13 */ "VMWRITE to read-only VMCS component.",
423 /* 14 */ "(Not Used)",
424 /* 15 */ "VMXON executed in VMX root operation.",
425 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
426 /* 17 */ "VM entry with non-launched executing VMCS.",
427 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
428 /* 19 */ "VMCALL with non-clear VMCS.",
429 /* 20 */ "VMCALL with invalid VM-exit control fields.",
430 /* 21 */ "(Not Used)",
431 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
432 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
433 /* 24 */ "VMCALL with invalid SMM-monitor features.",
434 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
435 /* 26 */ "VM entry with events blocked by MOV SS.",
436 /* 27 */ "(Not Used)",
437 /* 28 */ "Invalid operand to INVEPT/INVVPID."
438};
439#endif /* VBOX_STRICT */
440
441
442
443/**
444 * Updates the VM's last error record. If there was a VMX instruction error,
445 * reads the error data from the VMCS and updates VCPU's last error record as
446 * well.
447 *
448 * @param pVM Pointer to the VM.
449 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
450 * VERR_VMX_UNABLE_TO_START_VM or
451 * VERR_VMX_INVALID_VMCS_FIELD).
452 * @param rc The error code.
453 */
454static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
455{
456 AssertPtr(pVM);
457 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
458 || rc == VERR_VMX_UNABLE_TO_START_VM)
459 {
460 AssertPtrReturnVoid(pVCpu);
461 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
462 }
463 pVM->hm.s.lLastError = rc;
464}
465
466
467/**
468 * Reads the VM-entry interruption-information field from the VMCS into the VMX
469 * transient structure.
470 *
471 * @returns VBox status code.
472 * @param pVmxTransient Pointer to the VMX transient structure.
473 *
474 * @remarks No-long-jump zone!!!
475 */
476DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
477{
478 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
479 AssertRCReturn(rc, rc);
480 return VINF_SUCCESS;
481}
482
483
484/**
485 * Reads the VM-entry exception error code field from the VMCS into
486 * the VMX transient structure.
487 *
488 * @returns VBox status code.
489 * @param pVmxTransient Pointer to the VMX transient structure.
490 *
491 * @remarks No-long-jump zone!!!
492 */
493DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
494{
495 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
496 AssertRCReturn(rc, rc);
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Reads the VM-entry exception error code field from the VMCS into
503 * the VMX transient structure.
504 *
505 * @returns VBox status code.
506 * @param pVCpu Pointer to the VMCPU.
507 * @param pVmxTransient Pointer to the VMX transient structure.
508 *
509 * @remarks No-long-jump zone!!!
510 */
511DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
512{
513 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
514 AssertRCReturn(rc, rc);
515 return VINF_SUCCESS;
516}
517
518
519/**
520 * Reads the VM-exit interruption-information field from the VMCS into the VMX
521 * transient structure.
522 *
523 * @returns VBox status code.
524 * @param pVCpu Pointer to the VMCPU.
525 * @param pVmxTransient Pointer to the VMX transient structure.
526 */
527DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
528{
529 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
530 {
531 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
532 AssertRCReturn(rc, rc);
533 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
534 }
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Reads the VM-exit interruption error code from the VMCS into the VMX
541 * transient structure.
542 *
543 * @returns VBox status code.
544 * @param pVCpu Pointer to the VMCPU.
545 * @param pVmxTransient Pointer to the VMX transient structure.
546 */
547DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
548{
549 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
550 {
551 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
552 AssertRCReturn(rc, rc);
553 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
554 }
555 return VINF_SUCCESS;
556}
557
558
559/**
560 * Reads the VM-exit instruction length field from the VMCS into the VMX
561 * transient structure.
562 *
563 * @returns VBox status code.
564 * @param pVCpu Pointer to the VMCPU.
565 * @param pVmxTransient Pointer to the VMX transient structure.
566 */
567DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
568{
569 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
570 {
571 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
572 AssertRCReturn(rc, rc);
573 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
574 }
575 return VINF_SUCCESS;
576}
577
578
579/**
580 * Reads the exit qualification from the VMCS into the VMX transient structure.
581 *
582 * @returns VBox status code.
583 * @param pVCpu Pointer to the VMCPU.
584 * @param pVmxTransient Pointer to the VMX transient structure.
585 */
586DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
587{
588 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
589 {
590 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
591 AssertRCReturn(rc, rc);
592 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
593 }
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * Reads the IDT-vectoring information field from the VMCS into the VMX
600 * transient structure.
601 *
602 * @returns VBox status code.
603 * @param pVmxTransient Pointer to the VMX transient structure.
604 *
605 * @remarks No-long-jump zone!!!
606 */
607DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
608{
609 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
610 {
611 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
612 AssertRCReturn(rc, rc);
613 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
614 }
615 return VINF_SUCCESS;
616}
617
618
619/**
620 * Reads the IDT-vectoring error code from the VMCS into the VMX
621 * transient structure.
622 *
623 * @returns VBox status code.
624 * @param pVmxTransient Pointer to the VMX transient structure.
625 */
626DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
627{
628 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
629 {
630 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
631 AssertRCReturn(rc, rc);
632 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
633 }
634 return VINF_SUCCESS;
635}
636
637
638/**
639 * Enters VMX root mode operation on the current CPU.
640 *
641 * @returns VBox status code.
642 * @param pVM Pointer to the VM (optional, can be NULL, after
643 * a resume).
644 * @param HCPhysCpuPage Physical address of the VMXON region.
645 * @param pvCpuPage Pointer to the VMXON region.
646 */
647static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
648{
649 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
650 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
651 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
652
653 if (pVM)
654 {
655 /* Write the VMCS revision dword to the VMXON region. */
656 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
657 }
658
659 /* Enable the VMX bit in CR4 if necessary. */
660 RTCCUINTREG uCr4 = ASMGetCR4();
661 if (!(uCr4 & X86_CR4_VMXE))
662 ASMSetCR4(uCr4 | X86_CR4_VMXE);
663
664 /* Enter VMX root mode. */
665 int rc = VMXEnable(HCPhysCpuPage);
666 if (RT_FAILURE(rc))
667 ASMSetCR4(uCr4);
668
669 return rc;
670}
671
672
673/**
674 * Exits VMX root mode operation on the current CPU.
675 *
676 * @returns VBox status code.
677 */
678static int hmR0VmxLeaveRootMode(void)
679{
680 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
681
682 /* If we're for some reason not in VMX root mode, then don't leave it. */
683 RTCCUINTREG uHostCR4 = ASMGetCR4();
684 if (uHostCR4 & X86_CR4_VMXE)
685 {
686 /* Exit VMX root mode and clear the VMX bit in CR4. */
687 VMXDisable();
688 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
689 return VINF_SUCCESS;
690 }
691
692 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
693}
694
695
696/**
697 * Allocates and maps one physically contiguous page. The allocated page is
698 * zero'd out. (Used by various VT-x structures).
699 *
700 * @returns IPRT status code.
701 * @param pMemObj Pointer to the ring-0 memory object.
702 * @param ppVirt Where to store the virtual address of the
703 * allocation.
704 * @param pPhys Where to store the physical address of the
705 * allocation.
706 */
707DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
708{
709 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
710 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
711 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
712
713 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
714 if (RT_FAILURE(rc))
715 return rc;
716 *ppVirt = RTR0MemObjAddress(*pMemObj);
717 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
718 ASMMemZero32(*ppVirt, PAGE_SIZE);
719 return VINF_SUCCESS;
720}
721
722
723/**
724 * Frees and unmaps an allocated physical page.
725 *
726 * @param pMemObj Pointer to the ring-0 memory object.
727 * @param ppVirt Where to re-initialize the virtual address of
728 * allocation as 0.
729 * @param pHCPhys Where to re-initialize the physical address of the
730 * allocation as 0.
731 */
732DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
733{
734 AssertPtr(pMemObj);
735 AssertPtr(ppVirt);
736 AssertPtr(pHCPhys);
737 if (*pMemObj != NIL_RTR0MEMOBJ)
738 {
739 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
740 AssertRC(rc);
741 *pMemObj = NIL_RTR0MEMOBJ;
742 *ppVirt = 0;
743 *pHCPhys = 0;
744 }
745}
746
747
748/**
749 * Worker function to free VT-x related structures.
750 *
751 * @returns IPRT status code.
752 * @param pVM Pointer to the VM.
753 */
754static void hmR0VmxStructsFree(PVM pVM)
755{
756 for (VMCPUID i = 0; i < pVM->cCpus; i++)
757 {
758 PVMCPU pVCpu = &pVM->aCpus[i];
759 AssertPtr(pVCpu);
760
761#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
762 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
763 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
764#endif
765
766 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
767 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
768
769 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
770 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
771 }
772
773 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
774#ifdef VBOX_WITH_CRASHDUMP_MAGIC
775 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
776#endif
777}
778
779
780/**
781 * Worker function to allocate VT-x related VM structures.
782 *
783 * @returns IPRT status code.
784 * @param pVM Pointer to the VM.
785 */
786static int hmR0VmxStructsAlloc(PVM pVM)
787{
788 /*
789 * Initialize members up-front so we can cleanup properly on allocation failure.
790 */
791#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
792 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
793 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
794 pVM->hm.s.vmx.HCPhys##a_Name = 0;
795
796#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
797 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
798 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
799 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
800
801#ifdef VBOX_WITH_CRASHDUMP_MAGIC
802 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
803#endif
804 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
805
806 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
807 for (VMCPUID i = 0; i < pVM->cCpus; i++)
808 {
809 PVMCPU pVCpu = &pVM->aCpus[i];
810 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
811 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
812 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
813#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
814 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
815 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
816#endif
817 }
818#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
819#undef VMXLOCAL_INIT_VM_MEMOBJ
820
821 /*
822 * Allocate all the VT-x structures.
823 */
824 int rc = VINF_SUCCESS;
825#ifdef VBOX_WITH_CRASHDUMP_MAGIC
826 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
827 if (RT_FAILURE(rc))
828 goto cleanup;
829 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
830 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
831#endif
832
833 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
834 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
835 {
836 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
837 &pVM->hm.s.vmx.HCPhysApicAccess);
838 if (RT_FAILURE(rc))
839 goto cleanup;
840 }
841
842 /*
843 * Initialize per-VCPU VT-x structures.
844 */
845 for (VMCPUID i = 0; i < pVM->cCpus; i++)
846 {
847 PVMCPU pVCpu = &pVM->aCpus[i];
848 AssertPtr(pVCpu);
849
850 /* Allocate the VM control structure (VMCS). */
851 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
852 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
853 if (RT_FAILURE(rc))
854 goto cleanup;
855
856 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
857 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
858 {
859 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
860 &pVCpu->hm.s.vmx.HCPhysVirtApic);
861 if (RT_FAILURE(rc))
862 goto cleanup;
863 }
864
865 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
866 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
867 {
868 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
869 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
870 if (RT_FAILURE(rc))
871 goto cleanup;
872 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
873 }
874
875#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
876 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
877 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
878 if (RT_FAILURE(rc))
879 goto cleanup;
880
881 /* Allocate the VM-exit MSR-load page for the host MSRs. */
882 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
883 if (RT_FAILURE(rc))
884 goto cleanup;
885#endif
886 }
887
888 return VINF_SUCCESS;
889
890cleanup:
891 hmR0VmxStructsFree(pVM);
892 return rc;
893}
894
895
896/**
897 * Does global VT-x initialization (called during module initialization).
898 *
899 * @returns VBox status code.
900 */
901VMMR0DECL(int) VMXR0GlobalInit(void)
902{
903#ifdef HMVMX_USE_FUNCTION_TABLE
904 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
905# ifdef VBOX_STRICT
906 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
907 Assert(g_apfnVMExitHandlers[i]);
908# endif
909#endif
910 return VINF_SUCCESS;
911}
912
913
914/**
915 * Does global VT-x termination (called during module termination).
916 */
917VMMR0DECL(void) VMXR0GlobalTerm()
918{
919 /* Nothing to do currently. */
920}
921
922
923/**
924 * Sets up and activates VT-x on the current CPU.
925 *
926 * @returns VBox status code.
927 * @param pCpu Pointer to the global CPU info struct.
928 * @param pVM Pointer to the VM (can be NULL after a host resume
929 * operation).
930 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
931 * fEnabledByHost is true).
932 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
933 * @a fEnabledByHost is true).
934 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
935 * enable VT-x on the host.
936 */
937VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
938{
939 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
940 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
941
942 if (!fEnabledByHost)
943 {
944 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
945 if (RT_FAILURE(rc))
946 return rc;
947 }
948
949 /*
950 * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that
951 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
952 * each time while reusing a VPID after hitting the MaxASID limit once.
953 */
954 if ( pVM
955 && pVM->hm.s.fNestedPaging)
956 {
957 /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */
958 Assert(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
959 hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
960 pCpu->fFlushAsidBeforeUse = false;
961 }
962 else
963 {
964 /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM
965 * without Nested Paging triggered this function) we still have the risk
966 * of potentially running with stale TLB-entries from other hypervisors
967 * when later we use a VM with NestedPaging. To fix this properly we will
968 * have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read
969 * 'vmx_ept_vpid_caps' from it. Sigh. */
970 pCpu->fFlushAsidBeforeUse = true;
971 }
972
973 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
974 ++pCpu->cTlbFlushes;
975
976 return VINF_SUCCESS;
977}
978
979
980/**
981 * Deactivates VT-x on the current CPU.
982 *
983 * @returns VBox status code.
984 * @param pCpu Pointer to the global CPU info struct.
985 * @param pvCpuPage Pointer to the VMXON region.
986 * @param HCPhysCpuPage Physical address of the VMXON region.
987 *
988 * @remarks This function should never be called when SUPR0EnableVTx() or
989 * similar was used to enable VT-x on the host.
990 */
991VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
992{
993 NOREF(pCpu);
994 NOREF(pvCpuPage);
995 NOREF(HCPhysCpuPage);
996
997 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
998 return hmR0VmxLeaveRootMode();
999}
1000
1001
1002/**
1003 * Sets the permission bits for the specified MSR in the MSR bitmap.
1004 *
1005 * @param pVCpu Pointer to the VMCPU.
1006 * @param uMSR The MSR value.
1007 * @param enmRead Whether reading this MSR causes a VM-exit.
1008 * @param enmWrite Whether writing this MSR causes a VM-exit.
1009 */
1010static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1011{
1012 int32_t iBit;
1013 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1014
1015 /*
1016 * Layout:
1017 * 0x000 - 0x3ff - Low MSR read bits
1018 * 0x400 - 0x7ff - High MSR read bits
1019 * 0x800 - 0xbff - Low MSR write bits
1020 * 0xc00 - 0xfff - High MSR write bits
1021 */
1022 if (uMsr <= 0x00001FFF)
1023 iBit = uMsr;
1024 else if ( uMsr >= 0xC0000000
1025 && uMsr <= 0xC0001FFF)
1026 {
1027 iBit = (uMsr - 0xC0000000);
1028 pbMsrBitmap += 0x400;
1029 }
1030 else
1031 {
1032 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1033 return;
1034 }
1035
1036 Assert(iBit <= 0x1fff);
1037 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1038 ASMBitSet(pbMsrBitmap, iBit);
1039 else
1040 ASMBitClear(pbMsrBitmap, iBit);
1041
1042 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1043 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1044 else
1045 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1046}
1047
1048
1049/**
1050 * Flushes the TLB using EPT.
1051 *
1052 * @returns VBox status code.
1053 * @param pVM Pointer to the VM.
1054 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1055 * enmFlush).
1056 * @param enmFlush Type of flush.
1057 */
1058static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
1059{
1060 AssertPtr(pVM);
1061 Assert(pVM->hm.s.fNestedPaging);
1062
1063 uint64_t descriptor[2];
1064 if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS)
1065 descriptor[0] = 0;
1066 else
1067 {
1068 Assert(pVCpu);
1069 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1070 }
1071 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1072
1073 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1074 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1075 rc));
1076 if ( RT_SUCCESS(rc)
1077 && pVCpu)
1078 {
1079 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1080 }
1081}
1082
1083
1084/**
1085 * Flushes the TLB using VPID.
1086 *
1087 * @returns VBox status code.
1088 * @param pVM Pointer to the VM.
1089 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1090 * enmFlush).
1091 * @param enmFlush Type of flush.
1092 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1093 * on @a enmFlush).
1094 */
1095static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1096{
1097 AssertPtr(pVM);
1098 Assert(pVM->hm.s.vmx.fVpid);
1099
1100 uint64_t descriptor[2];
1101 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1102 {
1103 descriptor[0] = 0;
1104 descriptor[1] = 0;
1105 }
1106 else
1107 {
1108 AssertPtr(pVCpu);
1109 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1110 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1111 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1112 descriptor[1] = GCPtr;
1113 }
1114
1115 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1116 AssertMsg(rc == VINF_SUCCESS,
1117 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1118 if ( RT_SUCCESS(rc)
1119 && pVCpu)
1120 {
1121 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1122 }
1123}
1124
1125
1126/**
1127 * Invalidates a guest page by guest virtual address. Only relevant for
1128 * EPT/VPID, otherwise there is nothing really to invalidate.
1129 *
1130 * @returns VBox status code.
1131 * @param pVM Pointer to the VM.
1132 * @param pVCpu Pointer to the VMCPU.
1133 * @param GCVirt Guest virtual address of the page to invalidate.
1134 */
1135VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1136{
1137 AssertPtr(pVM);
1138 AssertPtr(pVCpu);
1139 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1140
1141 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1142 if (!fFlushPending)
1143 {
1144 /*
1145 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1146 * See @bugref{6043} and @bugref{6177}.
1147 *
1148 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1149 * function maybe called in a loop with individual addresses.
1150 */
1151 if (pVM->hm.s.vmx.fVpid)
1152 {
1153 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1154 {
1155 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1156 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1157 }
1158 else
1159 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1160 }
1161 else if (pVM->hm.s.fNestedPaging)
1162 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1163 }
1164
1165 return VINF_SUCCESS;
1166}
1167
1168
1169/**
1170 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1171 * otherwise there is nothing really to invalidate.
1172 *
1173 * @returns VBox status code.
1174 * @param pVM Pointer to the VM.
1175 * @param pVCpu Pointer to the VMCPU.
1176 * @param GCPhys Guest physical address of the page to invalidate.
1177 */
1178VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1179{
1180 LogFlowFunc(("%RGp\n", GCPhys));
1181
1182 /*
1183 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1184 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1185 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1186 */
1187 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1188 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1189 return VINF_SUCCESS;
1190}
1191
1192
1193/**
1194 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1195 * case where neither EPT nor VPID is supported by the CPU.
1196 *
1197 * @param pVM Pointer to the VM.
1198 * @param pVCpu Pointer to the VMCPU.
1199 *
1200 * @remarks Called with interrupts disabled.
1201 */
1202static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1203{
1204 NOREF(pVM);
1205 AssertPtr(pVCpu);
1206 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1207 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1208
1209 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1210 AssertPtr(pCpu);
1211
1212 pVCpu->hm.s.TlbShootdown.cPages = 0;
1213 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1214 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1215 pVCpu->hm.s.fForceTLBFlush = false;
1216 return;
1217}
1218
1219
1220/**
1221 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1222 *
1223 * @param pVM Pointer to the VM.
1224 * @param pVCpu Pointer to the VMCPU.
1225 * @remarks All references to "ASID" in this function pertains to "VPID" in
1226 * Intel's nomenclature. The reason is, to avoid confusion in compare
1227 * statements since the host-CPU copies are named "ASID".
1228 *
1229 * @remarks Called with interrupts disabled.
1230 */
1231static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1232{
1233#ifdef VBOX_WITH_STATISTICS
1234 bool fTlbFlushed = false;
1235# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1236# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1237 if (!fTlbFlushed) \
1238 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1239 } while (0)
1240#else
1241# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1242# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1243#endif
1244
1245 AssertPtr(pVM);
1246 AssertPtr(pVCpu);
1247 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1248 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1249 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1250
1251 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1252 AssertPtr(pCpu);
1253
1254 /*
1255 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1256 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1257 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1258 */
1259 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1260 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1261 {
1262 ++pCpu->uCurrentAsid;
1263 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1264 {
1265 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1266 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1267 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1268 }
1269
1270 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1271 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1272 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1273
1274 /*
1275 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1276 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1277 */
1278 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1279 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1280 HMVMX_SET_TAGGED_TLB_FLUSHED();
1281 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1282 }
1283
1284 /* Check for explicit TLB shootdowns. */
1285 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1286 {
1287 /*
1288 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1289 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1290 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1291 * but not guest-physical mappings.
1292 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1293 */
1294 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1295 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1296 HMVMX_SET_TAGGED_TLB_FLUSHED();
1297 }
1298
1299 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1300 * not be executed. See hmQueueInvlPage() where it is commented
1301 * out. Support individual entry flushing someday. */
1302 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1303 {
1304 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1305
1306 /*
1307 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1308 * as supported by the CPU.
1309 */
1310 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1311 {
1312 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1313 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1314 }
1315 else
1316 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1317
1318 HMVMX_SET_TAGGED_TLB_FLUSHED();
1319 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1320 }
1321
1322 pVCpu->hm.s.TlbShootdown.cPages = 0;
1323 pVCpu->hm.s.fForceTLBFlush = false;
1324
1325 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1326
1327 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1328 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1329 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1330 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1331 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1332 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1333 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1334 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1335
1336 /* Update VMCS with the VPID. */
1337 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1338 AssertRC(rc);
1339
1340#undef HMVMX_SET_TAGGED_TLB_FLUSHED
1341}
1342
1343
1344/**
1345 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1346 *
1347 * @returns VBox status code.
1348 * @param pVM Pointer to the VM.
1349 * @param pVCpu Pointer to the VMCPU.
1350 *
1351 * @remarks Called with interrupts disabled.
1352 */
1353static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1354{
1355 AssertPtr(pVM);
1356 AssertPtr(pVCpu);
1357 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1358 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1359
1360 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1361 AssertPtr(pCpu);
1362
1363 /*
1364 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1365 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1366 */
1367 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1368 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1369 {
1370 pVCpu->hm.s.fForceTLBFlush = true;
1371 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1372 }
1373
1374 /* Check for explicit TLB shootdown flushes. */
1375 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1376 {
1377 pVCpu->hm.s.fForceTLBFlush = true;
1378 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1379 }
1380
1381 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1382 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1383
1384 if (pVCpu->hm.s.fForceTLBFlush)
1385 {
1386 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1387 pVCpu->hm.s.fForceTLBFlush = false;
1388 }
1389 else
1390 {
1391 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1392 * not be executed. See hmQueueInvlPage() where it is commented
1393 * out. Support individual entry flushing someday. */
1394 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1395 {
1396 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1397 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1398 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1399 }
1400 else
1401 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1402 }
1403
1404 pVCpu->hm.s.TlbShootdown.cPages = 0;
1405 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1406}
1407
1408
1409/**
1410 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1411 *
1412 * @returns VBox status code.
1413 * @param pVM Pointer to the VM.
1414 * @param pVCpu Pointer to the VMCPU.
1415 *
1416 * @remarks Called with interrupts disabled.
1417 */
1418static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1419{
1420 AssertPtr(pVM);
1421 AssertPtr(pVCpu);
1422 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1423 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1424
1425 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1426
1427 /*
1428 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1429 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1430 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1431 */
1432 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1433 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1434 {
1435 pVCpu->hm.s.fForceTLBFlush = true;
1436 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1437 }
1438
1439 /* Check for explicit TLB shootdown flushes. */
1440 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1441 {
1442 /*
1443 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1444 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1445 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1446 */
1447 pVCpu->hm.s.fForceTLBFlush = true;
1448 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1449 }
1450
1451 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1452 if (pVCpu->hm.s.fForceTLBFlush)
1453 {
1454 ++pCpu->uCurrentAsid;
1455 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1456 {
1457 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
1458 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1459 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1460 }
1461
1462 pVCpu->hm.s.fForceTLBFlush = false;
1463 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1464 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1465 if (pCpu->fFlushAsidBeforeUse)
1466 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1467 }
1468 else
1469 {
1470 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1471 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1472 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1473 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1474
1475 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1476 * not be executed. See hmQueueInvlPage() where it is commented
1477 * out. Support individual entry flushing someday. */
1478 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1479 {
1480 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1481 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1482 {
1483 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1484 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1485 }
1486 else
1487 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1488 }
1489 else
1490 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1491 }
1492
1493 pVCpu->hm.s.TlbShootdown.cPages = 0;
1494 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1495
1496 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1497 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1498 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1499 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1500 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1501 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1502
1503 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1504 AssertRC(rc);
1505}
1506
1507
1508/**
1509 * Flushes the guest TLB entry based on CPU capabilities.
1510 *
1511 * @param pVCpu Pointer to the VMCPU.
1512 */
1513DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
1514{
1515 PVM pVM = pVCpu->CTX_SUFF(pVM);
1516 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
1517 {
1518 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
1519 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
1520 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
1521 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
1522 default:
1523 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
1524 break;
1525 }
1526}
1527
1528
1529/**
1530 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1531 * TLB entries from the host TLB before VM-entry.
1532 *
1533 * @returns VBox status code.
1534 * @param pVM Pointer to the VM.
1535 */
1536static int hmR0VmxSetupTaggedTlb(PVM pVM)
1537{
1538 /*
1539 * Determine optimal flush type for Nested Paging.
1540 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1541 * guest execution (see hmR3InitFinalizeR0()).
1542 */
1543 if (pVM->hm.s.fNestedPaging)
1544 {
1545 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1546 {
1547 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1548 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1549 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1550 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1551 else
1552 {
1553 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1554 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1555 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1556 }
1557
1558 /* Make sure the write-back cacheable memory type for EPT is supported. */
1559 if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1560 {
1561 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1562 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1563 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1564 }
1565 }
1566 else
1567 {
1568 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1569 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1570 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1571 }
1572 }
1573
1574 /*
1575 * Determine optimal flush type for VPID.
1576 */
1577 if (pVM->hm.s.vmx.fVpid)
1578 {
1579 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1580 {
1581 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1582 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1583 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1584 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1585 else
1586 {
1587 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1588 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1589 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1590 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1591 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1592 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1593 pVM->hm.s.vmx.fVpid = false;
1594 }
1595 }
1596 else
1597 {
1598 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1599 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1600 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1601 pVM->hm.s.vmx.fVpid = false;
1602 }
1603 }
1604
1605 /*
1606 * Setup the handler for flushing tagged-TLBs.
1607 */
1608 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1609 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
1610 else if (pVM->hm.s.fNestedPaging)
1611 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
1612 else if (pVM->hm.s.vmx.fVpid)
1613 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
1614 else
1615 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
1616 return VINF_SUCCESS;
1617}
1618
1619
1620/**
1621 * Sets up pin-based VM-execution controls in the VMCS.
1622 *
1623 * @returns VBox status code.
1624 * @param pVM Pointer to the VM.
1625 * @param pVCpu Pointer to the VMCPU.
1626 */
1627static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1628{
1629 AssertPtr(pVM);
1630 AssertPtr(pVCpu);
1631
1632 uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
1633 uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
1634
1635 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1636 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1637 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
1638
1639 /* Enable the VMX preemption timer. */
1640 if (pVM->hm.s.vmx.fUsePreemptTimer)
1641 {
1642 Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
1643 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
1644 }
1645
1646 if ((val & zap) != val)
1647 {
1648 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1649 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
1650 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1651 }
1652
1653 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
1654 AssertRCReturn(rc, rc);
1655
1656 /* Update VCPU with the currently set pin-based VM-execution controls. */
1657 pVCpu->hm.s.vmx.u32PinCtls = val;
1658 return rc;
1659}
1660
1661
1662/**
1663 * Sets up processor-based VM-execution controls in the VMCS.
1664 *
1665 * @returns VBox status code.
1666 * @param pVM Pointer to the VM.
1667 * @param pVMCPU Pointer to the VMCPU.
1668 */
1669static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1670{
1671 AssertPtr(pVM);
1672 AssertPtr(pVCpu);
1673
1674 int rc = VERR_INTERNAL_ERROR_5;
1675 uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1676 uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1677
1678 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
1679 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1680 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1681 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1682 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1683 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1684 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1685
1686 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1687 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
1688 || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
1689 {
1690 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
1691 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1692 }
1693
1694 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1695 if (!pVM->hm.s.fNestedPaging)
1696 {
1697 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1698 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
1699 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
1700 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
1701 }
1702
1703 /* Use TPR shadowing if supported by the CPU. */
1704 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
1705 {
1706 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1707 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1708 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1709 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1710 AssertRCReturn(rc, rc);
1711
1712 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1713 /* CR8 writes causes a VM-exit based on TPR threshold. */
1714 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
1715 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
1716 }
1717 else
1718 {
1719 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1720 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1721 }
1722
1723 /* Use MSR-bitmaps if supported by the CPU. */
1724 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1725 {
1726 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
1727
1728 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1729 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1730 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1731 AssertRCReturn(rc, rc);
1732
1733 /*
1734 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1735 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1736 */
1737 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1738 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1739 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1740 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1741 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1742 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1743 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1744 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1745 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1746 }
1747
1748 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1749 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1750 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1751
1752 if ((val & zap) != val)
1753 {
1754 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1755 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1756 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1757 }
1758
1759 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
1760 AssertRCReturn(rc, rc);
1761
1762 /* Update VCPU with the currently set processor-based VM-execution controls. */
1763 pVCpu->hm.s.vmx.u32ProcCtls = val;
1764
1765 /*
1766 * Secondary processor-based VM-execution controls.
1767 */
1768 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1769 {
1770 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1771 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1772
1773 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1774 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1775
1776 if (pVM->hm.s.fNestedPaging)
1777 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1778 else
1779 {
1780 /*
1781 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1782 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
1783 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1784 */
1785 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1786 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1787 }
1788
1789 if (pVM->hm.s.vmx.fVpid)
1790 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1791
1792 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1793 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1794
1795 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1796 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1797 * done dynamically. */
1798 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1799 {
1800 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1801 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1802 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1803 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1804 AssertRCReturn(rc, rc);
1805 }
1806
1807 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1808 {
1809 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1810 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1811 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1812 }
1813
1814 if ((val & zap) != val)
1815 {
1816 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1817 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
1818 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1819 }
1820
1821 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
1822 AssertRCReturn(rc, rc);
1823
1824 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1825 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1826 }
1827
1828 return VINF_SUCCESS;
1829}
1830
1831
1832/**
1833 * Sets up miscellaneous (everything other than Pin & Processor-based
1834 * VM-execution) control fields in the VMCS.
1835 *
1836 * @returns VBox status code.
1837 * @param pVM Pointer to the VM.
1838 * @param pVCpu Pointer to the VMCPU.
1839 */
1840static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1841{
1842 AssertPtr(pVM);
1843 AssertPtr(pVCpu);
1844
1845 int rc = VERR_GENERAL_FAILURE;
1846
1847 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1848#if 0
1849 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1850 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
1851 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
1852
1853 /*
1854 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1855 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1856 * We thus use the exception bitmap to control it rather than use both.
1857 */
1858 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
1859 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
1860
1861 /** @todo Explore possibility of using IO-bitmaps. */
1862 /* All IO & IOIO instructions cause VM-exits. */
1863 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
1864 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
1865
1866 /* Initialize the MSR-bitmap area. */
1867 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1868 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
1869 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1870#endif
1871
1872#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1873 /* Setup MSR autoloading/storing. */
1874 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1875 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1876 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1877 AssertRCReturn(rc, rc);
1878 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1879 AssertRCReturn(rc, rc);
1880
1881 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1882 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1883 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1884 AssertRCReturn(rc, rc);
1885#endif
1886
1887 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1888 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
1889 AssertRCReturn(rc, rc);
1890
1891 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1892#if 0
1893 /* Setup debug controls */
1894 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
1895 AssertRCReturn(rc, rc);
1896 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
1897 AssertRCReturn(rc, rc);
1898#endif
1899
1900 return rc;
1901}
1902
1903
1904/**
1905 * Sets up the initial exception bitmap in the VMCS based on static conditions
1906 * (i.e. conditions that cannot ever change at runtime).
1907 *
1908 * @returns VBox status code.
1909 * @param pVM Pointer to the VM.
1910 * @param pVCpu Pointer to the VMCPU.
1911 */
1912static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1913{
1914 AssertPtr(pVM);
1915 AssertPtr(pVCpu);
1916
1917 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1918
1919 uint32_t u32XcptBitmap = 0;
1920
1921 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
1922 if (!pVM->hm.s.fNestedPaging)
1923 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
1924
1925 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
1926 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1927 AssertRCReturn(rc, rc);
1928 return rc;
1929}
1930
1931
1932/**
1933 * Sets up the initial guest-state mask. The guest-state mask is consulted
1934 * before reading guest-state fields from the VMCS as VMREADs can be expensive
1935 * for the nested virtualization case (as it would cause a VM-exit).
1936 *
1937 * @param pVCpu Pointer to the VMCPU.
1938 */
1939static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
1940{
1941 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
1942 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
1943 return VINF_SUCCESS;
1944}
1945
1946
1947/**
1948 * Does per-VM VT-x initialization.
1949 *
1950 * @returns VBox status code.
1951 * @param pVM Pointer to the VM.
1952 */
1953VMMR0DECL(int) VMXR0InitVM(PVM pVM)
1954{
1955 LogFlowFunc(("pVM=%p\n", pVM));
1956
1957 int rc = hmR0VmxStructsAlloc(pVM);
1958 if (RT_FAILURE(rc))
1959 {
1960 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
1961 return rc;
1962 }
1963
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Does per-VM VT-x termination.
1970 *
1971 * @returns VBox status code.
1972 * @param pVM Pointer to the VM.
1973 */
1974VMMR0DECL(int) VMXR0TermVM(PVM pVM)
1975{
1976 LogFlowFunc(("pVM=%p\n", pVM));
1977
1978#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1979 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
1980 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
1981#endif
1982 hmR0VmxStructsFree(pVM);
1983 return VINF_SUCCESS;
1984}
1985
1986
1987/**
1988 * Sets up the VM for execution under VT-x.
1989 * This function is only called once per-VM during initalization.
1990 *
1991 * @returns VBox status code.
1992 * @param pVM Pointer to the VM.
1993 */
1994VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
1995{
1996 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
1997 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1998
1999 LogFlowFunc(("pVM=%p\n", pVM));
2000
2001 /*
2002 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2003 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
2004 */
2005 /* -XXX- change hmR3InitFinalizeR0Intel() to fail if pRealModeTSS alloc fails. */
2006 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2007 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2008 || !pVM->hm.s.vmx.pRealModeTSS))
2009 {
2010 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2011 return VERR_INTERNAL_ERROR;
2012 }
2013
2014 /* Initialize these always, see hmR3InitFinalizeR0().*/
2015 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
2016 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
2017
2018 /* Setup the tagged-TLB flush handlers. */
2019 int rc = hmR0VmxSetupTaggedTlb(pVM);
2020 if (RT_FAILURE(rc))
2021 {
2022 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2023 return rc;
2024 }
2025
2026 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2027 {
2028 PVMCPU pVCpu = &pVM->aCpus[i];
2029 AssertPtr(pVCpu);
2030 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2031
2032 /* Set revision dword at the beginning of the VMCS structure. */
2033 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
2034
2035 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2036 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2037 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2038 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2039
2040 /* Load this VMCS as the current VMCS. */
2041 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2042 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2043 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2044
2045 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2046 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2047 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2048
2049 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2050 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2051 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2052
2053 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2054 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2055 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2056
2057 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2058 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2059 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2060
2061 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2062 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2063 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2064
2065#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2066 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2067 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2068 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2069#endif
2070
2071 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2072 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2073 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2074 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2075
2076 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2077 }
2078
2079 return VINF_SUCCESS;
2080}
2081
2082
2083/**
2084 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2085 * the VMCS.
2086 *
2087 * @returns VBox status code.
2088 * @param pVM Pointer to the VM.
2089 * @param pVCpu Pointer to the VMCPU.
2090 */
2091DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2092{
2093 RTCCUINTREG uReg = ASMGetCR0();
2094 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2095 AssertRCReturn(rc, rc);
2096
2097#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2098 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2099 if (HMVMX_IS_64BIT_HOST_MODE())
2100 {
2101 uint64_t uRegCR3 = HMR0Get64bitCR3();
2102 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2103 }
2104 else
2105#endif
2106 {
2107 uReg = ASMGetCR3();
2108 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2109 }
2110 AssertRCReturn(rc, rc);
2111
2112 uReg = ASMGetCR4();
2113 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2114 AssertRCReturn(rc, rc);
2115 return rc;
2116}
2117
2118
2119/**
2120 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2121 * the host-state area in the VMCS.
2122 *
2123 * @returns VBox status code.
2124 * @param pVM Pointer to the VM.
2125 * @param pVCpu Pointer to the VMCPU.
2126 */
2127DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2128{
2129 int rc = VERR_INTERNAL_ERROR_5;
2130 RTSEL uSelDS = 0;
2131 RTSEL uSelES = 0;
2132 RTSEL uSelFS = 0;
2133 RTSEL uSelGS = 0;
2134 RTSEL uSelTR = 0;
2135
2136 /*
2137 * Host DS, ES, FS and GS segment registers.
2138 */
2139#if HC_ARCH_BITS == 64
2140 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2141 uSelDS = ASMGetDS();
2142 uSelES = ASMGetES();
2143 uSelFS = ASMGetFS();
2144 uSelGS = ASMGetGS();
2145#endif
2146
2147 /*
2148 * Host CS and SS segment registers.
2149 */
2150 RTSEL uSelCS;
2151 RTSEL uSelSS;
2152#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2153 if (HMVMX_IS_64BIT_HOST_MODE())
2154 {
2155 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2156 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2157 }
2158 else
2159 {
2160 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2161 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2162 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2163 }
2164#else
2165 uSelCS = ASMGetCS();
2166 uSelSS = ASMGetSS();
2167#endif
2168
2169 /*
2170 * Host TR segment register.
2171 */
2172 uSelTR = ASMGetTR();
2173
2174#if HC_ARCH_BITS == 64
2175 /*
2176 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2177 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2178 */
2179 if (uSelDS & (X86_SEL_RPL | X86_SEL_LDT))
2180 {
2181 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_DS;
2182 pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS;
2183 uSelDS = 0;
2184 }
2185 if (uSelES & (X86_SEL_RPL | X86_SEL_LDT))
2186 {
2187 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_ES;
2188 pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES;
2189 uSelES = 0;
2190 }
2191 if (uSelFS & (X86_SEL_RPL | X86_SEL_LDT))
2192 {
2193 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_FS;
2194 pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS;
2195 uSelFS = 0;
2196 }
2197 if (uSelGS & (X86_SEL_RPL | X86_SEL_LDT))
2198 {
2199 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_GS;
2200 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS;
2201 uSelGS = 0;
2202 }
2203#endif
2204
2205 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2206 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2207 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2208 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2209 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2210 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2211 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2212 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2213 Assert(uSelCS);
2214 Assert(uSelTR);
2215
2216 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2217#if 0
2218 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2219 Assert(uSelSS != 0);
2220#endif
2221
2222 /* Write these host selector fields into the host-state area in the VMCS. */
2223 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2224 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2225#if HC_ARCH_BITS == 64
2226 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2227 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2228 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2229 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2230#endif
2231 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2232
2233 /*
2234 * Host GDTR and IDTR.
2235 */
2236 RTGDTR Gdtr;
2237 RT_ZERO(Gdtr);
2238#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2239 if (HMVMX_IS_64BIT_HOST_MODE())
2240 {
2241 X86XDTR64 Gdtr64;
2242 X86XDTR64 Idtr64;
2243 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2244 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
2245 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
2246
2247 Gdtr.cbGdt = Gdtr64.cb;
2248 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2249 }
2250 else
2251#endif
2252 {
2253 RTIDTR Idtr;
2254 ASMGetGDTR(&Gdtr);
2255 ASMGetIDTR(&Idtr);
2256 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
2257 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
2258
2259#if HC_ARCH_BITS == 64
2260 /*
2261 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
2262 * maximum limit (0xffff) on every VM-exit.
2263 */
2264 if (Gdtr.cbGdt != 0xffff)
2265 {
2266 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
2267 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
2268 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2269 }
2270
2271 /*
2272 * IDT limit is practically 0xfff. Therefore if the host has the limit as 0xfff, VT-x bloating the limit to 0xffff
2273 * is not a problem as it's not possible to get at them anyway. See Intel spec. 6.14.1 "64-Bit Mode IDT" and
2274 * Intel spec. 6.2 "Exception and Interrupt Vectors".
2275 */
2276 if (Idtr.cbIdt < 0x0fff)
2277 {
2278 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
2279 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
2280 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
2281 }
2282#endif
2283 }
2284
2285 /*
2286 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2287 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2288 */
2289 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2290 {
2291 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2292 return VERR_VMX_INVALID_HOST_STATE;
2293 }
2294
2295 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2296#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2297 if (HMVMX_IS_64BIT_HOST_MODE())
2298 {
2299 /* We need the 64-bit TR base for hybrid darwin. */
2300 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2301 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2302 }
2303 else
2304#endif
2305 {
2306 uintptr_t uTRBase;
2307#if HC_ARCH_BITS == 64
2308 uTRBase = X86DESC64_BASE(pDesc);
2309#else
2310 uTRBase = X86DESC_BASE(pDesc);
2311#endif
2312 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2313 }
2314 AssertRCReturn(rc, rc);
2315
2316 /*
2317 * Host FS base and GS base.
2318 */
2319#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2320 if (HMVMX_IS_64BIT_HOST_MODE())
2321 {
2322 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2323 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2324 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
2325 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
2326
2327# if HC_ARCH_BITS == 64
2328 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
2329 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
2330 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
2331 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
2332 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
2333# endif
2334 }
2335#endif
2336 return rc;
2337}
2338
2339
2340/**
2341 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2342 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2343 * the host after every successful VM exit.
2344 *
2345 * @returns VBox status code.
2346 * @param pVM Pointer to the VM.
2347 * @param pVCpu Pointer to the VMCPU.
2348 */
2349DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2350{
2351 AssertPtr(pVCpu);
2352 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2353
2354 int rc = VINF_SUCCESS;
2355#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2356 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
2357 uint32_t cHostMsrs = 0;
2358 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2359
2360 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2361 {
2362 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
2363
2364# if HC_ARCH_BITS == 64
2365 /* Paranoia. 64-bit code requires these bits to be set always. */
2366 Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
2367
2368 /*
2369 * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation.
2370 * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for
2371 * some reason (e.g. allow transparent reads) we would activate the code below.
2372 */
2373# if 0
2374 /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */
2375 Assert(u64HostEfer & (MSR_K6_EFER_NXE));
2376 /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has.
2377 See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */
2378 if (CPUMIsGuestInLongMode(pVCpu))
2379 {
2380 uint64_t u64GuestEfer;
2381 rc = CPUMQueryGuestMsr(pVCpu, MSR_K6_EFER, &u64GuestEfer);
2382 AssertRC(rc);
2383
2384 if ((u64HostEfer & MSR_K6_EFER_SCE) != (u64GuestEfer & MSR_K6_EFER_SCE))
2385 {
2386 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2387 pHostMsr->u32Reserved = 0;
2388 pHostMsr->u64Value = u64HostEfer;
2389 pHostMsr++; cHostMsrs++;
2390 }
2391 }
2392# endif
2393# else /* HC_ARCH_BITS != 64 */
2394 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2395 pHostMsr->u32Reserved = 0;
2396# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2397 if (CPUMIsGuestInLongMode(pVCpu))
2398 {
2399 /* Must match the EFER value in our 64 bits switcher. */
2400 pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2401 }
2402 else
2403# endif
2404 pHostMsr->u64Value = u64HostEfer;
2405 pHostMsr++; cHostMsrs++;
2406# endif /* HC_ARCH_BITS == 64 */
2407 }
2408
2409# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2410 if (HMVMX_IS_64BIT_HOST_MODE())
2411 {
2412 pHostMsr->u32IndexMSR = MSR_K6_STAR;
2413 pHostMsr->u32Reserved = 0;
2414 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2415 pHostMsr++; cHostMsrs++;
2416 pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
2417 pHostMsr->u32Reserved = 0;
2418 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
2419 pHostMsr++; cHostMsrs++;
2420 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
2421 pHostMsr->u32Reserved = 0;
2422 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2423 pHostMsr++; cHostMsrs++;
2424 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2425 pHostMsr->u32Reserved = 0;
2426 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2427 pHostMsr++; cHostMsrs++;
2428 }
2429# endif
2430
2431 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2432 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
2433 {
2434 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
2435 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2436 }
2437
2438 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
2439#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2440
2441 /*
2442 * Host Sysenter MSRs.
2443 */
2444 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2445 AssertRCReturn(rc, rc);
2446#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2447 if (HMVMX_IS_64BIT_HOST_MODE())
2448 {
2449 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2450 AssertRCReturn(rc, rc);
2451 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2452 }
2453 else
2454 {
2455 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2456 AssertRCReturn(rc, rc);
2457 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2458 }
2459#elif HC_ARCH_BITS == 32
2460 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2461 AssertRCReturn(rc, rc);
2462 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2463#else
2464 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2465 AssertRCReturn(rc, rc);
2466 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2467#endif
2468 AssertRCReturn(rc, rc);
2469
2470 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2471 * hmR0VmxSetupExitCtls() !! */
2472 return rc;
2473}
2474
2475
2476/**
2477 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2478 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2479 * controls".
2480 *
2481 * @returns VBox status code.
2482 * @param pVCpu Pointer to the VMCPU.
2483 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2484 * out-of-sync. Make sure to update the required fields
2485 * before using them.
2486 *
2487 * @remarks No-long-jump zone!!!
2488 */
2489DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2490{
2491 int rc = VINF_SUCCESS;
2492 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2493 {
2494 PVM pVM = pVCpu->CTX_SUFF(pVM);
2495 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2496 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2497
2498 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2499 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
2500
2501 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2502 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2503 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
2504 else
2505 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
2506
2507 /*
2508 * The following should not be set (since we're not in SMM mode):
2509 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
2510 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
2511 */
2512
2513 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
2514 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
2515 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
2516
2517 if ((val & zap) != val)
2518 {
2519 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2520 pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
2521 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2522 }
2523
2524 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
2525 AssertRCReturn(rc, rc);
2526
2527 /* Update VCPU with the currently set VM-exit controls. */
2528 pVCpu->hm.s.vmx.u32EntryCtls = val;
2529 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2530 }
2531 return rc;
2532}
2533
2534
2535/**
2536 * Sets up the VM-exit controls in the VMCS.
2537 *
2538 * @returns VBox status code.
2539 * @param pVM Pointer to the VM.
2540 * @param pVCpu Pointer to the VMCPU.
2541 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2542 * out-of-sync. Make sure to update the required fields
2543 * before using them.
2544 *
2545 * @remarks requires EFER.
2546 */
2547DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2548{
2549 int rc = VINF_SUCCESS;
2550 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2551 {
2552 PVM pVM = pVCpu->CTX_SUFF(pVM);
2553 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2554 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2555
2556 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2557 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
2558
2559 /*
2560 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
2561 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
2562 */
2563#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2564 if (HMVMX_IS_64BIT_HOST_MODE())
2565 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
2566 else
2567 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2568#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2569 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2570 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2571 else
2572 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2573#endif
2574
2575 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2576 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
2577
2578 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
2579 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
2580 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
2581 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
2582 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
2583
2584 if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
2585 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
2586
2587 if ((val & zap) != val)
2588 {
2589 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2590 pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
2591 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2592 }
2593
2594 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
2595 AssertRCReturn(rc, rc);
2596
2597 /* Update VCPU with the currently set VM-exit controls. */
2598 pVCpu->hm.s.vmx.u32ExitCtls = val;
2599 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2600 }
2601 return rc;
2602}
2603
2604
2605/**
2606 * Loads the guest APIC and related state.
2607 *
2608 * @returns VBox status code.
2609 * @param pVM Pointer to the VM.
2610 * @param pVCpu Pointer to the VMCPU.
2611 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2612 * out-of-sync. Make sure to update the required fields
2613 * before using them.
2614 */
2615DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2616{
2617 int rc = VINF_SUCCESS;
2618 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2619 {
2620 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2621 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2622 {
2623 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2624
2625 bool fPendingIntr = false;
2626 uint8_t u8Tpr = 0;
2627 uint8_t u8PendingIntr = 0;
2628 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
2629 AssertRCReturn(rc, rc);
2630
2631 /*
2632 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
2633 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
2634 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2635 * the interrupt when we VM-exit for other reasons.
2636 */
2637 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2638 uint32_t u32TprThreshold = 0;
2639 if (fPendingIntr)
2640 {
2641 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2642 const uint8_t u8PendingPriority = (u8PendingIntr >> 4);
2643 const uint8_t u8TprPriority = (u8Tpr >> 4) & 7;
2644 if (u8PendingPriority <= u8TprPriority)
2645 u32TprThreshold = u8PendingPriority;
2646 else
2647 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
2648 }
2649 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2650
2651 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2652 AssertRCReturn(rc, rc);
2653
2654 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2655 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
2656 {
2657 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* EFER always up-to-date. */
2658 pMixedCtx->msrLSTAR = u8Tpr;
2659 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2660 {
2661 /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
2662 if (fPendingIntr)
2663 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
2664 else
2665 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2666 }
2667 }
2668 }
2669
2670 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2671 }
2672 return rc;
2673}
2674
2675
2676/**
2677 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
2678 *
2679 * @returns Guest's interruptibility-state.
2680 * @param pVCpu Pointer to the VMCPU.
2681 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2682 * out-of-sync. Make sure to update the required fields
2683 * before using them.
2684 *
2685 * @remarks No-long-jump zone!!!
2686 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2687 */
2688DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2689{
2690 /*
2691 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2692 * inhibit interrupts or clear any existing interrupt-inhibition.
2693 */
2694 uint32_t uIntrState = 0;
2695 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2696 {
2697 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2698 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
2699 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2700 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2701 {
2702 /*
2703 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2704 * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct.
2705 */
2706 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2707 }
2708 else if (pMixedCtx->eflags.Bits.u1IF)
2709 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2710 else
2711 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2712 }
2713 return uIntrState;
2714}
2715
2716
2717/**
2718 * Loads the guest's interruptibility-state into the guest-state area in the
2719 * VMCS.
2720 *
2721 * @returns VBox status code.
2722 * @param pVCpu Pointer to the VMCPU.
2723 * @param uIntrState The interruptibility-state to set.
2724 */
2725static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
2726{
2727 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
2728 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2729 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2730 AssertRCReturn(rc, rc);
2731 return rc;
2732}
2733
2734
2735/**
2736 * Loads the guest's RIP into the guest-state area in the VMCS.
2737 *
2738 * @returns VBox status code.
2739 * @param pVCpu Pointer to the VMCPU.
2740 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2741 * out-of-sync. Make sure to update the required fields
2742 * before using them.
2743 *
2744 * @remarks No-long-jump zone!!!
2745 */
2746static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2747{
2748 int rc = VINF_SUCCESS;
2749 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2750 {
2751 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2752 AssertRCReturn(rc, rc);
2753 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
2754 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2755 }
2756 return rc;
2757}
2758
2759
2760/**
2761 * Loads the guest's RSP into the guest-state area in the VMCS.
2762 *
2763 * @returns VBox status code.
2764 * @param pVCpu Pointer to the VMCPU.
2765 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2766 * out-of-sync. Make sure to update the required fields
2767 * before using them.
2768 *
2769 * @remarks No-long-jump zone!!!
2770 */
2771static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2772{
2773 int rc = VINF_SUCCESS;
2774 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2775 {
2776 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2777 AssertRCReturn(rc, rc);
2778 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2779 }
2780 return rc;
2781}
2782
2783
2784/**
2785 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2786 *
2787 * @returns VBox status code.
2788 * @param pVCpu Pointer to the VMCPU.
2789 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2790 * out-of-sync. Make sure to update the required fields
2791 * before using them.
2792 *
2793 * @remarks No-long-jump zone!!!
2794 */
2795static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2796{
2797 int rc = VINF_SUCCESS;
2798 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2799 {
2800 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2801 Let us assert it as such and use 32-bit VMWRITE. */
2802 Assert(!(pMixedCtx->rflags.u64 >> 32));
2803 X86EFLAGS uEFlags = pMixedCtx->eflags;
2804 uEFlags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2805 uEFlags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2806
2807 /*
2808 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2809 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2810 */
2811 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2812 {
2813 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2814 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2815 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uEFlags.u32; /* Save the original eflags of the real-mode guest. */
2816 uEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2817 uEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2818 }
2819
2820 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, uEFlags.u32);
2821 AssertRCReturn(rc, rc);
2822
2823 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));
2824 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2825 }
2826 return rc;
2827}
2828
2829
2830/**
2831 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
2832 *
2833 * @returns VBox status code.
2834 * @param pVCpu Pointer to the VMCPU.
2835 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2836 * out-of-sync. Make sure to update the required fields
2837 * before using them.
2838 *
2839 * @remarks No-long-jump zone!!!
2840 */
2841DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2842{
2843 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
2844 AssertRCReturn(rc, rc);
2845 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
2846 AssertRCReturn(rc, rc);
2847 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
2848 AssertRCReturn(rc, rc);
2849 return rc;
2850}
2851
2852
2853/**
2854 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
2855 * in the VMCS.
2856 *
2857 * @returns VBox status code.
2858 * @param pVM Pointer to the VM.
2859 * @param pVCpu Pointer to the VMCPU.
2860 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2861 * out-of-sync. Make sure to update the required fields
2862 * before using them.
2863 *
2864 * @remarks No-long-jump zone!!!
2865 */
2866static int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
2867{
2868 int rc = VINF_SUCCESS;
2869 PVM pVM = pVCpu->CTX_SUFF(pVM);
2870
2871 /*
2872 * Guest CR0.
2873 * Guest FPU.
2874 */
2875 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2876 {
2877 Assert(!(pCtx->cr0 >> 32));
2878 uint32_t u32GuestCR0 = pCtx->cr0;
2879
2880 /* The guest's view (read access) of its CR0 is unblemished. */
2881 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
2882 AssertRCReturn(rc, rc);
2883 Log4(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
2884
2885 /* Setup VT-x's view of the guest CR0. */
2886 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2887 if (pVM->hm.s.fNestedPaging)
2888 {
2889 if (CPUMIsGuestPagingEnabledEx(pCtx))
2890 {
2891 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
2892 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2893 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
2894 }
2895 else
2896 {
2897 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
2898 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2899 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2900 }
2901
2902 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2903 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2904 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2905
2906 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
2907 AssertRCReturn(rc, rc);
2908 }
2909 else
2910 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2911
2912 /*
2913 * Guest FPU bits.
2914 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
2915 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2916 */
2917 u32GuestCR0 |= X86_CR0_NE;
2918 bool fInterceptNM = false;
2919 if (CPUMIsGuestFPUStateActive(pVCpu))
2920 {
2921 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
2922 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
2923 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
2924 }
2925 else
2926 {
2927 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
2928 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
2929 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
2930 }
2931
2932 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
2933 bool fInterceptMF = false;
2934 if (!(pCtx->cr0 & X86_CR0_NE))
2935 fInterceptMF = true;
2936
2937 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
2938 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2939 {
2940 Assert(PDMVmmDevHeapIsEnabled(pVM));
2941 Assert(pVM->hm.s.vmx.pRealModeTSS);
2942 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2943 fInterceptNM = true;
2944 fInterceptMF = true;
2945 }
2946 else
2947 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2948
2949 if (fInterceptNM)
2950 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
2951 else
2952 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
2953
2954 if (fInterceptMF)
2955 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
2956 else
2957 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
2958
2959 /* Additional intercepts for debugging, define these yourself explicitly. */
2960#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2961 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_BP)
2962 | RT_BIT(X86_XCPT_DB)
2963 | RT_BIT(X86_XCPT_DE)
2964 | RT_BIT(X86_XCPT_NM)
2965 | RT_BIT(X86_XCPT_UD)
2966 | RT_BIT(X86_XCPT_NP)
2967 | RT_BIT(X86_XCPT_SS)
2968 | RT_BIT(X86_XCPT_GP)
2969 | RT_BIT(X86_XCPT_PF)
2970 | RT_BIT(X86_XCPT_MF);
2971#elif defined(HMVMX_ALWAYS_TRAP_PF)
2972 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2973#endif
2974
2975 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
2976
2977 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
2978 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2979 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2980 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
2981 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
2982 else
2983 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2984
2985 u32GuestCR0 |= uSetCR0;
2986 u32GuestCR0 &= uZapCR0;
2987 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
2988
2989 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
2990 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
2991 AssertRCReturn(rc, rc);
2992 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
2993 AssertRCReturn(rc, rc);
2994 Log4(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
2995
2996 /*
2997 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
2998 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
2999 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3000 */
3001 uint32_t u32CR0Mask = 0;
3002 u32CR0Mask = X86_CR0_PE
3003 | X86_CR0_NE
3004 | X86_CR0_WP
3005 | X86_CR0_PG
3006 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3007 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3008 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3009 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3010 u32CR0Mask &= ~X86_CR0_PE;
3011 if (pVM->hm.s.fNestedPaging)
3012 u32CR0Mask &= ~X86_CR0_WP;
3013
3014 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3015 if (fInterceptNM)
3016 u32CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
3017 else
3018 u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
3019
3020 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3021 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3022 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3023 AssertRCReturn(rc, rc);
3024
3025 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
3026 }
3027
3028 /*
3029 * Guest CR2.
3030 * It's always loaded in the assembler code. Nothing to do here.
3031 */
3032
3033 /*
3034 * Guest CR3.
3035 */
3036 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
3037 {
3038 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3039 if (pVM->hm.s.fNestedPaging)
3040 {
3041 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3042
3043 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3044 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3045 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3046 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3047
3048 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3049 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3050 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3051
3052 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3053 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3054 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3055 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3056
3057 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3058 AssertRCReturn(rc, rc);
3059 Log4(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3060
3061 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3062 || CPUMIsGuestPagingEnabledEx(pCtx))
3063 {
3064 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3065 if (CPUMIsGuestInPAEModeEx(pCtx))
3066 {
3067 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3068 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3069 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3070 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3071 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3072 }
3073
3074 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3075 have Unrestricted Execution to handle the guest when it's not using paging. */
3076 GCPhysGuestCR3 = pCtx->cr3;
3077 }
3078 else
3079 {
3080 /*
3081 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3082 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3083 * EPT takes care of translating it to host-physical addresses.
3084 */
3085 RTGCPHYS GCPhys;
3086 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3087 Assert(PDMVmmDevHeapIsEnabled(pVM));
3088
3089 /* We obtain it here every time as the guest could have relocated this PCI region. */
3090 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3091 AssertRCReturn(rc, rc);
3092
3093 GCPhysGuestCR3 = GCPhys;
3094 }
3095
3096 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3));
3097 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3098 }
3099 else
3100 {
3101 /* Non-nested paging case, just use the hypervisor's CR3. */
3102 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3103
3104 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3));
3105 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3106 }
3107 AssertRCReturn(rc, rc);
3108
3109 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
3110 }
3111
3112 /*
3113 * Guest CR4.
3114 */
3115 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
3116 {
3117 Assert(!(pCtx->cr4 >> 32));
3118 uint32_t u32GuestCR4 = pCtx->cr4;
3119
3120 /* The guest's view of its CR4 is unblemished. */
3121 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3122 AssertRCReturn(rc, rc);
3123 Log4(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
3124
3125 /* Setup VT-x's view of the guest CR4. */
3126 /*
3127 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3128 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3129 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3130 */
3131 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3132 {
3133 Assert(pVM->hm.s.vmx.pRealModeTSS);
3134 Assert(PDMVmmDevHeapIsEnabled(pVM));
3135 u32GuestCR4 &= ~X86_CR4_VME;
3136 }
3137
3138 if (pVM->hm.s.fNestedPaging)
3139 {
3140 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
3141 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3142 {
3143 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3144 u32GuestCR4 |= X86_CR4_PSE;
3145 /* Our identity mapping is a 32 bits page directory. */
3146 u32GuestCR4 &= ~X86_CR4_PAE;
3147 }
3148 /* else use guest CR4.*/
3149 }
3150 else
3151 {
3152 /*
3153 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3154 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3155 */
3156 switch (pVCpu->hm.s.enmShadowMode)
3157 {
3158 case PGMMODE_REAL: /* Real-mode. */
3159 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3160 case PGMMODE_32_BIT: /* 32-bit paging. */
3161 {
3162 u32GuestCR4 &= ~X86_CR4_PAE;
3163 break;
3164 }
3165
3166 case PGMMODE_PAE: /* PAE paging. */
3167 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3168 {
3169 u32GuestCR4 |= X86_CR4_PAE;
3170 break;
3171 }
3172
3173 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3174 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3175#ifdef VBOX_ENABLE_64_BITS_GUESTS
3176 break;
3177#endif
3178 default:
3179 AssertFailed();
3180 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3181 }
3182 }
3183
3184 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3185 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3186 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3187 u32GuestCR4 |= uSetCR4;
3188 u32GuestCR4 &= uZapCR4;
3189
3190 /* Write VT-x's view of the guest CR4 into the VMCS. */
3191 Log4(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
3192 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3193 AssertRCReturn(rc, rc);
3194
3195 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
3196 uint32_t u32CR4Mask = 0;
3197 u32CR4Mask = X86_CR4_VME
3198 | X86_CR4_PAE
3199 | X86_CR4_PGE
3200 | X86_CR4_PSE
3201 | X86_CR4_VMXE;
3202 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
3203 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
3204 AssertRCReturn(rc, rc);
3205
3206 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
3207 }
3208 return rc;
3209}
3210
3211
3212/**
3213 * Loads the guest debug registers into the guest-state area in the VMCS.
3214 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
3215 *
3216 * @returns VBox status code.
3217 * @param pVCpu Pointer to the VMCPU.
3218 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3219 * out-of-sync. Make sure to update the required fields
3220 * before using them.
3221 *
3222 * @remarks No-long-jump zone!!!
3223 */
3224static int hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3225{
3226 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
3227 return VINF_SUCCESS;
3228
3229#ifdef VBOX_STRICT
3230 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3231 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
3232 {
3233 Assert(!(pMixedCtx->dr[7] >> 32)); /* upper 32 bits are reserved (MBZ). */
3234 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3235 Assert((pMixedCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */
3236 Assert((pMixedCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */
3237 }
3238#endif
3239
3240 int rc = VERR_INTERNAL_ERROR_5;
3241 PVM pVM = pVCpu->CTX_SUFF(pVM);
3242 bool fInterceptDB = false;
3243 bool fInterceptMovDRx = false;
3244 if (DBGFIsStepping(pVCpu))
3245 {
3246 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3247 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
3248 {
3249 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
3250 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3251 AssertRCReturn(rc, rc);
3252 Assert(fInterceptDB == false);
3253 }
3254 else
3255 {
3256 fInterceptDB = true;
3257 pMixedCtx->eflags.u32 |= X86_EFL_TF;
3258 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RFLAGS;
3259 }
3260 }
3261
3262 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3263 {
3264 if (!CPUMIsHyperDebugStateActive(pVCpu))
3265 {
3266 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3267 AssertRC(rc);
3268 }
3269 Assert(CPUMIsHyperDebugStateActive(pVCpu));
3270 fInterceptMovDRx = true;
3271 }
3272 else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3273 {
3274 if (!CPUMIsGuestDebugStateActive(pVCpu))
3275 {
3276 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3277 AssertRC(rc);
3278 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3279 }
3280 Assert(CPUMIsGuestDebugStateActive(pVCpu));
3281 Assert(fInterceptMovDRx == false);
3282 }
3283 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3284 {
3285 /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
3286 fInterceptMovDRx = true;
3287 }
3288
3289 /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
3290 if (fInterceptDB)
3291 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3292 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3293 {
3294#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3295 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3296#endif
3297 }
3298
3299 /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
3300 if (fInterceptMovDRx)
3301 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3302 else
3303 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3304
3305 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3306 AssertRCReturn(rc, rc);
3307 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3308 AssertRCReturn(rc, rc);
3309
3310 /* The guest's view of its DR7 is unblemished. Use 32-bit write as upper 32-bits MBZ as asserted above. */
3311 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
3312 AssertRCReturn(rc, rc);
3313
3314 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3315 return rc;
3316}
3317
3318
3319#ifdef VBOX_STRICT
3320/**
3321 * Strict function to validate segment registers.
3322 *
3323 * @remarks Requires CR0.
3324 */
3325static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3326{
3327 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3328 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
3329 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
3330
3331 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
3332 * only updates the VMCS bits with the unusable bit and doesn't change the guest-context value. */
3333 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3334 && ( !CPUMIsGuestInRealModeEx(pCtx)
3335 && !CPUMIsGuestInV86ModeEx(pCtx)))
3336 {
3337 /* Protected mode checks */
3338 /* CS */
3339 Assert(pCtx->cs.Attr.n.u1Present);
3340 Assert(!(pCtx->cs.Attr.u & 0xf00));
3341 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3342 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3343 || !(pCtx->cs.Attr.n.u1Granularity));
3344 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3345 || (pCtx->cs.Attr.n.u1Granularity));
3346 /* CS cannot be loaded with NULL in protected mode. */
3347 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & HMVMX_SEL_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
3348 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3349 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3350 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3351 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3352 else
3353 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3354 /* SS */
3355 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3356 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3357 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
3358 if ( !(pCtx->cr0 & X86_CR0_PE)
3359 || pCtx->cs.Attr.n.u4Type == 3)
3360 {
3361 Assert(!pCtx->ss.Attr.n.u2Dpl);
3362 }
3363 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & HMVMX_SEL_UNUSABLE))
3364 {
3365 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3366 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3367 Assert(pCtx->ss.Attr.n.u1Present);
3368 Assert(!(pCtx->ss.Attr.u & 0xf00));
3369 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3370 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3371 || !(pCtx->ss.Attr.n.u1Granularity));
3372 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3373 || (pCtx->ss.Attr.n.u1Granularity));
3374 }
3375 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3376 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & HMVMX_SEL_UNUSABLE))
3377 {
3378 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3379 Assert(pCtx->ds.Attr.n.u1Present);
3380 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3381 Assert(!(pCtx->ds.Attr.u & 0xf00));
3382 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3383 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3384 || !(pCtx->ds.Attr.n.u1Granularity));
3385 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3386 || (pCtx->ds.Attr.n.u1Granularity));
3387 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3388 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3389 }
3390 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & HMVMX_SEL_UNUSABLE))
3391 {
3392 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3393 Assert(pCtx->es.Attr.n.u1Present);
3394 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3395 Assert(!(pCtx->es.Attr.u & 0xf00));
3396 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3397 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3398 || !(pCtx->es.Attr.n.u1Granularity));
3399 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3400 || (pCtx->es.Attr.n.u1Granularity));
3401 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3402 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3403 }
3404 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & HMVMX_SEL_UNUSABLE))
3405 {
3406 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3407 Assert(pCtx->fs.Attr.n.u1Present);
3408 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3409 Assert(!(pCtx->fs.Attr.u & 0xf00));
3410 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3411 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3412 || !(pCtx->fs.Attr.n.u1Granularity));
3413 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3414 || (pCtx->fs.Attr.n.u1Granularity));
3415 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3416 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3417 }
3418 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & HMVMX_SEL_UNUSABLE))
3419 {
3420 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3421 Assert(pCtx->gs.Attr.n.u1Present);
3422 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3423 Assert(!(pCtx->gs.Attr.u & 0xf00));
3424 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3425 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3426 || !(pCtx->gs.Attr.n.u1Granularity));
3427 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3428 || (pCtx->gs.Attr.n.u1Granularity));
3429 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3430 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3431 }
3432 /* 64-bit capable CPUs. */
3433# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3434 Assert(!(pCtx->cs.u64Base >> 32));
3435 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3436 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3437 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3438# endif
3439 }
3440 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3441 || ( CPUMIsGuestInRealModeEx(pCtx)
3442 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3443 {
3444 /* Real and v86 mode checks. */
3445 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3446 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3447 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3448 {
3449 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3450 }
3451 else
3452 {
3453 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3454 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3455 }
3456
3457 /* CS */
3458 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3459 Assert(pCtx->cs.u32Limit == 0xffff);
3460 Assert(u32CSAttr == 0xf3);
3461 /* SS */
3462 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3463 Assert(pCtx->ss.u32Limit == 0xffff);
3464 Assert(u32SSAttr == 0xf3);
3465 /* DS */
3466 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3467 Assert(pCtx->ds.u32Limit == 0xffff);
3468 Assert(u32DSAttr == 0xf3);
3469 /* ES */
3470 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3471 Assert(pCtx->es.u32Limit == 0xffff);
3472 Assert(u32ESAttr == 0xf3);
3473 /* FS */
3474 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3475 Assert(pCtx->fs.u32Limit == 0xffff);
3476 Assert(u32FSAttr == 0xf3);
3477 /* GS */
3478 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3479 Assert(pCtx->gs.u32Limit == 0xffff);
3480 Assert(u32GSAttr == 0xf3);
3481 /* 64-bit capable CPUs. */
3482# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3483 Assert(!(pCtx->cs.u64Base >> 32));
3484 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3485 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3486 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3487# endif
3488 }
3489}
3490#endif /* VBOX_STRICT */
3491
3492
3493/**
3494 * Writes a guest segment register into the guest-state area in the VMCS.
3495 *
3496 * @returns VBox status code.
3497 * @param pVCpu Pointer to the VMCPU.
3498 * @param idxSel Index of the selector in the VMCS.
3499 * @param idxLimit Index of the segment limit in the VMCS.
3500 * @param idxBase Index of the segment base in the VMCS.
3501 * @param idxAccess Index of the access rights of the segment in the VMCS.
3502 * @param pSelReg Pointer to the segment selector.
3503 * @param pCtx Pointer to the guest-CPU context.
3504 *
3505 * @remarks No-long-jump zone!!!
3506 */
3507static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3508 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3509{
3510 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3511 AssertRCReturn(rc, rc);
3512 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3513 AssertRCReturn(rc, rc);
3514 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3515 AssertRCReturn(rc, rc);
3516
3517 uint32_t u32Access = pSelReg->Attr.u;
3518 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3519 {
3520 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3521 u32Access = 0xf3;
3522 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3523 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3524 }
3525 else
3526 {
3527 /*
3528 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3529 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3530 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3531 * loaded in protected-mode have their attribute as 0.
3532 */
3533 if (!u32Access)
3534 u32Access = HMVMX_SEL_UNUSABLE;
3535 }
3536
3537 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3538 AssertMsg((u32Access & HMVMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3539 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3540
3541 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3542 AssertRCReturn(rc, rc);
3543 return rc;
3544}
3545
3546
3547/**
3548 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3549 * into the guest-state area in the VMCS.
3550 *
3551 * @returns VBox status code.
3552 * @param pVM Pointer to the VM.
3553 * @param pVCPU Pointer to the VMCPU.
3554 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3555 * out-of-sync. Make sure to update the required fields
3556 * before using them.
3557 *
3558 * @remarks Requires CR0 (strict builds validation).
3559 * @remarks No-long-jump zone!!!
3560 */
3561static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3562{
3563 int rc = VERR_INTERNAL_ERROR_5;
3564 PVM pVM = pVCpu->CTX_SUFF(pVM);
3565
3566 /*
3567 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3568 */
3569 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3570 {
3571 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3572 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3573 {
3574 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
3575 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
3576 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
3577 pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
3578 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
3579 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
3580 }
3581
3582#ifdef VBOX_WITH_REM
3583 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3584 {
3585 Assert(pVM->hm.s.vmx.pRealModeTSS);
3586 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3587 if ( pVCpu->hm.s.vmx.fWasInRealMode
3588 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
3589 {
3590 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3591 in real-mode (e.g. OpenBSD 4.0) */
3592 REMFlushTBs(pVM);
3593 Log4(("Load: Switch to protected mode detected!\n"));
3594 pVCpu->hm.s.vmx.fWasInRealMode = false;
3595 }
3596 }
3597#endif
3598 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3599 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
3600 AssertRCReturn(rc, rc);
3601 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3602 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
3603 AssertRCReturn(rc, rc);
3604 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3605 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
3606 AssertRCReturn(rc, rc);
3607 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3608 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
3609 AssertRCReturn(rc, rc);
3610 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3611 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
3612 AssertRCReturn(rc, rc);
3613 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3614 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
3615 AssertRCReturn(rc, rc);
3616
3617 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
3618 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
3619#ifdef VBOX_STRICT
3620 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
3621#endif
3622 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3623 }
3624
3625 /*
3626 * Guest TR.
3627 */
3628 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3629 {
3630 /*
3631 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3632 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3633 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3634 */
3635 uint16_t u16Sel = 0;
3636 uint32_t u32Limit = 0;
3637 uint64_t u64Base = 0;
3638 uint32_t u32AccessRights = 0;
3639
3640 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3641 {
3642 u16Sel = pMixedCtx->tr.Sel;
3643 u32Limit = pMixedCtx->tr.u32Limit;
3644 u64Base = pMixedCtx->tr.u64Base;
3645 u32AccessRights = pMixedCtx->tr.Attr.u;
3646 }
3647 else
3648 {
3649 Assert(pVM->hm.s.vmx.pRealModeTSS);
3650 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3651
3652 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3653 RTGCPHYS GCPhys;
3654 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3655 AssertRCReturn(rc, rc);
3656
3657 X86DESCATTR DescAttr;
3658 DescAttr.u = 0;
3659 DescAttr.n.u1Present = 1;
3660 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3661
3662 u16Sel = 0;
3663 u32Limit = HM_VTX_TSS_SIZE;
3664 u64Base = GCPhys; /* in real-mode phys = virt. */
3665 u32AccessRights = DescAttr.u;
3666 }
3667
3668 /* Validate. */
3669 Assert(!(u16Sel & RT_BIT(2)));
3670 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3671 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3672 AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3673 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3674 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3675 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3676 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3677 Assert( (u32Limit & 0xfff) == 0xfff
3678 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3679 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
3680 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3681
3682 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
3683 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
3684 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
3685 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
3686
3687 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3688 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3689 }
3690
3691 /*
3692 * Guest GDTR.
3693 */
3694 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3695 {
3696 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
3697 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
3698
3699 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3700 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
3701 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3702 }
3703
3704 /*
3705 * Guest LDTR.
3706 */
3707 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3708 {
3709 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3710 uint32_t u32Access = 0;
3711 if (!pMixedCtx->ldtr.Attr.u)
3712 u32Access = HMVMX_SEL_UNUSABLE;
3713 else
3714 u32Access = pMixedCtx->ldtr.Attr.u;
3715
3716 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
3717 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
3718 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
3719 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
3720
3721 /* Validate. */
3722 if (!(u32Access & HMVMX_SEL_UNUSABLE))
3723 {
3724 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3725 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3726 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3727 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3728 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3729 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3730 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
3731 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3732 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
3733 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3734 }
3735
3736 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
3737 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3738 }
3739
3740 /*
3741 * Guest IDTR.
3742 */
3743 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3744 {
3745 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
3746 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
3747
3748 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3749 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
3750 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3751 }
3752
3753 return VINF_SUCCESS;
3754}
3755
3756
3757/**
3758 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3759 * areas. These MSRs will automatically be loaded to the host CPU on every
3760 * successful VM entry and stored from the host CPU on every successful VM exit.
3761 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3762 *
3763 * @returns VBox status code.
3764 * @param pVCpu Pointer to the VMCPU.
3765 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3766 * out-of-sync. Make sure to update the required fields
3767 * before using them.
3768 *
3769 * @remarks No-long-jump zone!!!
3770 */
3771static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3772{
3773 AssertPtr(pVCpu);
3774 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3775
3776 /*
3777 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3778 */
3779 int rc = VINF_SUCCESS;
3780 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3781 {
3782#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3783 PVM pVM = pVCpu->CTX_SUFF(pVM);
3784 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3785 uint32_t cGuestMsrs = 0;
3786
3787 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3788 /** @todo r=ramshankar: Optimize this further to do lazy restoration and only
3789 * when the guest really is in 64-bit mode. */
3790 bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3791 if (fSupportsLongMode)
3792 {
3793 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
3794 pGuestMsr->u32Reserved = 0;
3795 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
3796 pGuestMsr++; cGuestMsrs++;
3797 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
3798 pGuestMsr->u32Reserved = 0;
3799 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3800 pGuestMsr++; cGuestMsrs++;
3801 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
3802 pGuestMsr->u32Reserved = 0;
3803 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
3804 pGuestMsr++; cGuestMsrs++;
3805 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
3806 pGuestMsr->u32Reserved = 0;
3807 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
3808 pGuestMsr++; cGuestMsrs++;
3809 }
3810
3811 /*
3812 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3813 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3814 */
3815 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3816 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3817 {
3818 pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
3819 pGuestMsr->u32Reserved = 0;
3820 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3821 AssertRCReturn(rc, rc);
3822 pGuestMsr++; cGuestMsrs++;
3823 }
3824
3825 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3826 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
3827 {
3828 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3829 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3830 }
3831
3832 /* Update the VCPU's copy of the guest MSR count. */
3833 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3834 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3835 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3836#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
3837
3838 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3839 }
3840
3841 /*
3842 * Guest Sysenter MSRs.
3843 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3844 * VM-exits on WRMSRs for these MSRs.
3845 */
3846 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
3847 {
3848 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
3849 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
3850 }
3851 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
3852 {
3853 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
3854 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
3855 }
3856 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
3857 {
3858 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
3859 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
3860 }
3861
3862 return rc;
3863}
3864
3865
3866/**
3867 * Loads the guest activity state into the guest-state area in the VMCS.
3868 *
3869 * @returns VBox status code.
3870 * @param pVCpu Pointer to the VMCPU.
3871 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3872 * out-of-sync. Make sure to update the required fields
3873 * before using them.
3874 *
3875 * @remarks No-long-jump zone!!!
3876 */
3877static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
3878{
3879 /** @todo See if we can make use of other states, e.g.
3880 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
3881 int rc = VINF_SUCCESS;
3882 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
3883 {
3884 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
3885 AssertRCReturn(rc, rc);
3886 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
3887 }
3888 return rc;
3889}
3890
3891
3892/**
3893 * Sets up the appropriate function to run guest code.
3894 *
3895 * @returns VBox status code.
3896 * @param pVCpu Pointer to the VMCPU.
3897 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3898 * out-of-sync. Make sure to update the required fields
3899 * before using them.
3900 *
3901 * @remarks No-long-jump zone!!!
3902 */
3903static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3904{
3905 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3906 {
3907#ifndef VBOX_ENABLE_64_BITS_GUESTS
3908 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3909#endif
3910 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
3911#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3912 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
3913 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
3914#else
3915 /* 64-bit host or hybrid host. */
3916 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
3917#endif
3918 }
3919 else
3920 {
3921 /* Guest is not in long mode, use the 32-bit handler. */
3922 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
3923 }
3924 Assert(pVCpu->hm.s.vmx.pfnStartVM);
3925 return VINF_SUCCESS;
3926}
3927
3928
3929/**
3930 * Wrapper for running the guest code in VT-x.
3931 *
3932 * @returns VBox strict status code.
3933 * @param pVM Pointer to the VM.
3934 * @param pVCpu Pointer to the VMCPU.
3935 * @param pCtx Pointer to the guest-CPU context.
3936 *
3937 * @remarks No-long-jump zone!!!
3938 */
3939DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3940{
3941 /*
3942 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
3943 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
3944 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
3945 */
3946#ifdef VBOX_WITH_KERNEL_USING_XMM
3947 return HMR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
3948#else
3949 return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
3950#endif
3951}
3952
3953
3954/**
3955 * Reports world-switch error and dumps some useful debug info.
3956 *
3957 * @param pVM Pointer to the VM.
3958 * @param pVCpu Pointer to the VMCPU.
3959 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
3960 * @param pCtx Pointer to the guest-CPU context.
3961 * @param pVmxTransient Pointer to the VMX transient structure (only
3962 * exitReason updated).
3963 */
3964static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
3965{
3966 Assert(pVM);
3967 Assert(pVCpu);
3968 Assert(pCtx);
3969 Assert(pVmxTransient);
3970 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3971
3972 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
3973 switch (rcVMRun)
3974 {
3975 case VERR_VMX_INVALID_VMXON_PTR:
3976 AssertFailed();
3977 break;
3978 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
3979 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
3980 {
3981 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.lasterror.u32ExitReason);
3982 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
3983 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
3984 AssertRC(rc);
3985
3986#ifdef VBOX_STRICT
3987 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,
3988 pVmxTransient->uExitReason));
3989 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
3990 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));
3991 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
3992 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));
3993 else
3994 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
3995
3996 /* VMX control bits. */
3997 uint32_t u32Val;
3998 uint64_t u64Val;
3999 HMVMXHCUINTREG uHCReg;
4000 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4001 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4002 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4003 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4004 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4005 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4006 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4007 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4008 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4009 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4010 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4011 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4012 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4013 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4014 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4015 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4016 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4017 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4018 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4019 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4020 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4021 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4022 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4023 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4024 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4025 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4026 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4027 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4028 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4029 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4030 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4031 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4032 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4033 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4034 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4035 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4036 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4037 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4038 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4039 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4040 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4041 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4042
4043 /* Guest bits. */
4044 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4045 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
4046 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
4047 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
4048 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
4049 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
4050 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
4051 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
4052
4053 /* Host bits. */
4054 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
4055 Log4(("Host CR0 %#RHr\n", uHCReg));
4056 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
4057 Log4(("Host CR3 %#RHr\n", uHCReg));
4058 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
4059 Log4(("Host CR4 %#RHr\n", uHCReg));
4060
4061 RTGDTR HostGdtr;
4062 PCX86DESCHC pDesc;
4063 ASMGetGDTR(&HostGdtr);
4064 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
4065 Log4(("Host CS %#08x\n", u32Val));
4066 if (u32Val < HostGdtr.cbGdt)
4067 {
4068 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4069 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
4070 }
4071
4072 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
4073 Log4(("Host DS %#08x\n", u32Val));
4074 if (u32Val < HostGdtr.cbGdt)
4075 {
4076 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4077 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
4078 }
4079
4080 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
4081 Log4(("Host ES %#08x\n", u32Val));
4082 if (u32Val < HostGdtr.cbGdt)
4083 {
4084 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4085 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
4086 }
4087
4088 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
4089 Log4(("Host FS %#08x\n", u32Val));
4090 if (u32Val < HostGdtr.cbGdt)
4091 {
4092 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4093 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
4094 }
4095
4096 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
4097 Log4(("Host GS %#08x\n", u32Val));
4098 if (u32Val < HostGdtr.cbGdt)
4099 {
4100 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4101 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
4102 }
4103
4104 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
4105 Log4(("Host SS %#08x\n", u32Val));
4106 if (u32Val < HostGdtr.cbGdt)
4107 {
4108 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4109 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
4110 }
4111
4112 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
4113 Log4(("Host TR %#08x\n", u32Val));
4114 if (u32Val < HostGdtr.cbGdt)
4115 {
4116 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4117 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
4118 }
4119
4120 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
4121 Log4(("Host TR Base %#RHv\n", uHCReg));
4122 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
4123 Log4(("Host GDTR Base %#RHv\n", uHCReg));
4124 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
4125 Log4(("Host IDTR Base %#RHv\n", uHCReg));
4126 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
4127 Log4(("Host SYSENTER CS %#08x\n", u32Val));
4128 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
4129 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
4130 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
4131 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
4132 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
4133 Log4(("Host RSP %#RHv\n", uHCReg));
4134 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
4135 Log4(("Host RIP %#RHv\n", uHCReg));
4136# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4137 if (HMVMX_IS_64BIT_HOST_MODE())
4138 {
4139 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
4140 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
4141 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
4142 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
4143 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
4144 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
4145 }
4146# endif
4147#endif /* VBOX_STRICT */
4148 break;
4149 }
4150
4151 default:
4152 /* Impossible */
4153 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
4154 break;
4155 }
4156 NOREF(pVM);
4157}
4158
4159
4160#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4161#ifndef VMX_USE_CACHED_VMCS_ACCESSES
4162# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
4163#endif
4164#ifdef VBOX_STRICT
4165static bool hmR0VmxIsValidWriteField(uint32_t idxField)
4166{
4167 switch (idxField)
4168 {
4169 case VMX_VMCS_GUEST_RIP:
4170 case VMX_VMCS_GUEST_RSP:
4171 case VMX_VMCS_GUEST_SYSENTER_EIP:
4172 case VMX_VMCS_GUEST_SYSENTER_ESP:
4173 case VMX_VMCS_GUEST_GDTR_BASE:
4174 case VMX_VMCS_GUEST_IDTR_BASE:
4175 case VMX_VMCS_GUEST_CS_BASE:
4176 case VMX_VMCS_GUEST_DS_BASE:
4177 case VMX_VMCS_GUEST_ES_BASE:
4178 case VMX_VMCS_GUEST_FS_BASE:
4179 case VMX_VMCS_GUEST_GS_BASE:
4180 case VMX_VMCS_GUEST_SS_BASE:
4181 case VMX_VMCS_GUEST_LDTR_BASE:
4182 case VMX_VMCS_GUEST_TR_BASE:
4183 case VMX_VMCS_GUEST_CR3:
4184 return true;
4185 }
4186 return false;
4187}
4188
4189static bool hmR0VmxIsValidReadField(uint32_t idxField)
4190{
4191 switch (idxField)
4192 {
4193 /* Read-only fields. */
4194 case VMX_VMCS_RO_EXIT_QUALIFICATION:
4195 return true;
4196 }
4197 /* Remaining readable fields should also be writable. */
4198 return hmR0VmxIsValidWriteField(idxField);
4199}
4200#endif /* VBOX_STRICT */
4201
4202
4203/**
4204 * Executes the specified handler in 64-bit mode.
4205 *
4206 * @returns VBox status code.
4207 * @param pVM Pointer to the VM.
4208 * @param pVCpu Pointer to the VMCPU.
4209 * @param pCtx Pointer to the guest CPU context.
4210 * @param enmOp The operation to perform.
4211 * @param cbParam Number of parameters.
4212 * @param paParam Array of 32-bit parameters.
4213 */
4214VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
4215 uint32_t *paParam)
4216{
4217 int rc, rc2;
4218 PHMGLOBLCPUINFO pCpu;
4219 RTHCPHYS HCPhysCpuPage;
4220 RTCCUINTREG uOldEFlags;
4221
4222 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4223 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
4224 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4225 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4226
4227#ifdef VBOX_STRICT
4228 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4229 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4230
4231 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4232 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4233#endif
4234
4235 /* Disable interrupts. */
4236 uOldEFlags = ASMIntDisableFlags();
4237
4238#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4239 RTCPUID idHostCpu = RTMpCpuId();
4240 CPUMR0SetLApic(pVM, idHostCpu);
4241#endif
4242
4243 pCpu = HMR0GetCurrentCpu();
4244 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4245
4246 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4247 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4248
4249 /* Leave VMX Root Mode. */
4250 VMXDisable();
4251
4252 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4253
4254 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4255 CPUMSetHyperEIP(pVCpu, enmOp);
4256 for (int i = (int)cbParam - 1; i >= 0; i--)
4257 CPUMPushHyper(pVCpu, paParam[i]);
4258
4259 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4260
4261 /* Call the switcher. */
4262 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4263 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4264
4265 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
4266 /* Make sure the VMX instructions don't cause #UD faults. */
4267 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4268
4269 /* Re-enter VMX Root Mode */
4270 rc2 = VMXEnable(HCPhysCpuPage);
4271 if (RT_FAILURE(rc2))
4272 {
4273 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4274 ASMSetFlags(uOldEFlags);
4275 return rc2;
4276 }
4277
4278 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4279 AssertRC(rc2);
4280 Assert(!(ASMGetFlags() & X86_EFL_IF));
4281 ASMSetFlags(uOldEFlags);
4282 return rc;
4283}
4284
4285
4286/**
4287 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4288 * supporting 64-bit guests.
4289 *
4290 * @returns VBox status code.
4291 * @param fResume Whether to VMLAUNCH or VMRESUME.
4292 * @param pCtx Pointer to the guest-CPU context.
4293 * @param pCache Pointer to the VMCS cache.
4294 * @param pVM Pointer to the VM.
4295 * @param pVCpu Pointer to the VMCPU.
4296 */
4297DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4298{
4299 uint32_t aParam[6];
4300 PHMGLOBLCPUINFO pCpu = NULL;
4301 RTHCPHYS HCPhysCpuPage = 0;
4302 int rc = VERR_INTERNAL_ERROR_5;
4303
4304 pCpu = HMR0GetCurrentCpu();
4305 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4306
4307#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4308 pCache->uPos = 1;
4309 pCache->interPD = PGMGetInterPaeCR3(pVM);
4310 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4311#endif
4312
4313#ifdef VBOX_STRICT
4314 pCache->TestIn.HCPhysCpuPage = 0;
4315 pCache->TestIn.HCPhysVmcs = 0;
4316 pCache->TestIn.pCache = 0;
4317 pCache->TestOut.HCPhysVmcs = 0;
4318 pCache->TestOut.pCache = 0;
4319 pCache->TestOut.pCtx = 0;
4320 pCache->TestOut.eflags = 0;
4321#endif
4322
4323 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4324 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4325 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4326 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4327 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4328 aParam[5] = 0;
4329
4330#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4331 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4332 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4333#endif
4334 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
4335
4336#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4337 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4338 Assert(pCtx->dr[4] == 10);
4339 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4340#endif
4341
4342#ifdef VBOX_STRICT
4343 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4344 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4345 pVCpu->hm.s.vmx.HCPhysVmcs));
4346 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4347 pCache->TestOut.HCPhysVmcs));
4348 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4349 pCache->TestOut.pCache));
4350 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4351 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4352 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4353 pCache->TestOut.pCtx));
4354 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4355#endif
4356 return rc;
4357}
4358
4359
4360/**
4361 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4362 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4363 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4364 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4365 *
4366 * @returns VBox status code.
4367 * @param pVM Pointer to the VM.
4368 * @param pVCpu Pointer to the VMCPU.
4369 */
4370static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4371{
4372#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4373{ \
4374 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4375 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4376 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4377 ++cReadFields; \
4378}
4379
4380 AssertPtr(pVM);
4381 AssertPtr(pVCpu);
4382 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4383 uint32_t cReadFields = 0;
4384
4385 /*
4386 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
4387 * and serve to indicate exceptions to the rules.
4388 */
4389
4390 /* Guest-natural selector base fields. */
4391#if 0
4392 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
4393 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4394 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4395#endif
4396 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4397 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4398 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4399 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4400 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4401 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4402 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4403 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4404 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4405 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4406 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4407 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4408#if 0
4409 /* Unused natural width guest-state fields. */
4410 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
4411 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4412#endif
4413 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4414 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4415
4416 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4417#if 0
4418 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4419 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4420 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4421 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4422 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4423 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4424 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4425 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4426 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4427#endif
4428
4429 /* Natural width guest-state fields. */
4430 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
4431#if 0
4432 /* Currently unused field. */
4433 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
4434#endif
4435
4436 if (pVM->hm.s.fNestedPaging)
4437 {
4438 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4439 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
4440 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
4441 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4442 }
4443 else
4444 {
4445 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
4446 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4447 }
4448
4449#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4450 return VINF_SUCCESS;
4451}
4452
4453
4454/**
4455 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4456 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4457 * darwin, running 64-bit guests).
4458 *
4459 * @returns VBox status code.
4460 * @param pVCpu Pointer to the VMCPU.
4461 * @param idxField The VMCS field encoding.
4462 * @param u64Val 16, 32 or 64 bits value.
4463 */
4464VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4465{
4466 int rc;
4467 switch (idxField)
4468 {
4469 /*
4470 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4471 */
4472 /* 64-bit Control fields. */
4473 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4474 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4475 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4476 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4477 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4478 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4479 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4480 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4481 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4482 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4483 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4484 case VMX_VMCS64_CTRL_EPTP_FULL:
4485 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4486 /* 64-bit Guest-state fields. */
4487 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4488 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4489 case VMX_VMCS64_GUEST_PAT_FULL:
4490 case VMX_VMCS64_GUEST_EFER_FULL:
4491 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4492 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4493 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4494 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4495 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4496 /* 64-bit Host-state fields. */
4497 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4498 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4499 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4500 {
4501 rc = VMXWriteVmcs32(idxField, u64Val);
4502 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
4503 break;
4504 }
4505
4506 /*
4507 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4508 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4509 */
4510 /* Natural-width Guest-state fields. */
4511 case VMX_VMCS_GUEST_CR3:
4512 case VMX_VMCS_GUEST_ES_BASE:
4513 case VMX_VMCS_GUEST_CS_BASE:
4514 case VMX_VMCS_GUEST_SS_BASE:
4515 case VMX_VMCS_GUEST_DS_BASE:
4516 case VMX_VMCS_GUEST_FS_BASE:
4517 case VMX_VMCS_GUEST_GS_BASE:
4518 case VMX_VMCS_GUEST_LDTR_BASE:
4519 case VMX_VMCS_GUEST_TR_BASE:
4520 case VMX_VMCS_GUEST_GDTR_BASE:
4521 case VMX_VMCS_GUEST_IDTR_BASE:
4522 case VMX_VMCS_GUEST_RSP:
4523 case VMX_VMCS_GUEST_RIP:
4524 case VMX_VMCS_GUEST_SYSENTER_ESP:
4525 case VMX_VMCS_GUEST_SYSENTER_EIP:
4526 {
4527 if (!(u64Val >> 32))
4528 {
4529 /* If this field is 64-bit, VT-x will zero out the top bits. */
4530 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4531 }
4532 else
4533 {
4534 /* Assert that only the 32->64 switcher case should ever come here. */
4535 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4536 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4537 }
4538 break;
4539 }
4540
4541 default:
4542 {
4543 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
4544 rc = VERR_INVALID_PARAMETER;
4545 break;
4546 }
4547 }
4548 AssertRCReturn(rc, rc);
4549 return rc;
4550}
4551
4552
4553/**
4554 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4555 * hosts (except darwin) for 64-bit guests.
4556 *
4557 * @param pVCpu Pointer to the VMCPU.
4558 * @param idxField The VMCS field encoding.
4559 * @param u64Val 16, 32 or 64 bits value.
4560 */
4561VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4562{
4563 AssertPtr(pVCpu);
4564 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4565
4566 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4567 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4568
4569 /* Make sure there are no duplicates. */
4570 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4571 {
4572 if (pCache->Write.aField[i] == idxField)
4573 {
4574 pCache->Write.aFieldVal[i] = u64Val;
4575 return VINF_SUCCESS;
4576 }
4577 }
4578
4579 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4580 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4581 pCache->Write.cValidEntries++;
4582 return VINF_SUCCESS;
4583}
4584
4585/* Enable later when the assembly code uses these as callbacks. */
4586#if 0
4587/*
4588 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4589 *
4590 * @param pVCpu Pointer to the VMCPU.
4591 * @param pCache Pointer to the VMCS cache.
4592 *
4593 * @remarks No-long-jump zone!!!
4594 */
4595VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4596{
4597 AssertPtr(pCache);
4598 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4599 {
4600 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4601 AssertRC(rc);
4602 }
4603 pCache->Write.cValidEntries = 0;
4604}
4605
4606
4607/**
4608 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4609 *
4610 * @param pVCpu Pointer to the VMCPU.
4611 * @param pCache Pointer to the VMCS cache.
4612 *
4613 * @remarks No-long-jump zone!!!
4614 */
4615VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4616{
4617 AssertPtr(pCache);
4618 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4619 {
4620 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4621 AssertRC(rc);
4622 }
4623}
4624#endif
4625#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4626
4627
4628/**
4629 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4630 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4631 * timer.
4632 *
4633 * @returns VBox status code.
4634 * @param pVCpu Pointer to the VMCPU.
4635 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4636 * out-of-sync. Make sure to update the required fields
4637 * before using them.
4638 * @remarks No-long-jump zone!!!
4639 */
4640static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4641{
4642 int rc = VERR_INTERNAL_ERROR_5;
4643 bool fOffsettedTsc = false;
4644 PVM pVM = pVCpu->CTX_SUFF(pVM);
4645 if (pVM->hm.s.vmx.fUsePreemptTimer)
4646 {
4647 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4648
4649 /* Make sure the returned values have sane upper and lower boundaries. */
4650 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4651 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4652 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4653 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4654
4655 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4656 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4657 }
4658 else
4659 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4660
4661 if (fOffsettedTsc)
4662 {
4663 uint64_t u64CurTSC = ASMReadTSC();
4664 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4665 {
4666 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4667 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4668
4669 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4670 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4671 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4672 }
4673 else
4674 {
4675 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4676 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4677 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4678 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4679 }
4680 }
4681 else
4682 {
4683 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4684 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4685 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4686 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4687 }
4688}
4689
4690
4691/**
4692 * Determines if an exception is a contributory exception. Contributory
4693 * exceptions are ones which can cause double-faults. Page-fault is
4694 * intentionally not included here as it's a conditional contributory exception.
4695 *
4696 * @returns true if the exception is contributory, false otherwise.
4697 * @param uVector The exception vector.
4698 */
4699DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4700{
4701 switch (uVector)
4702 {
4703 case X86_XCPT_GP:
4704 case X86_XCPT_SS:
4705 case X86_XCPT_NP:
4706 case X86_XCPT_TS:
4707 case X86_XCPT_DE:
4708 return true;
4709 default:
4710 break;
4711 }
4712 return false;
4713}
4714
4715
4716/**
4717 * Sets an event as a pending event to be injected into the guest.
4718 *
4719 * @param pVCpu Pointer to the VMCPU.
4720 * @param u32IntrInfo The VM-entry interruption-information field.
4721 * @param cbInstr The VM-entry instruction length in bytes (for software
4722 * interrupts, exceptions and privileged software
4723 * exceptions).
4724 * @param u32ErrCode The VM-entry exception error code.
4725 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4726 * page-fault.
4727 */
4728DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4729 RTGCUINTPTR GCPtrFaultAddress)
4730{
4731 Assert(!pVCpu->hm.s.Event.fPending);
4732 pVCpu->hm.s.Event.fPending = true;
4733 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4734 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
4735 pVCpu->hm.s.Event.cbInstr = cbInstr;
4736 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
4737}
4738
4739
4740/**
4741 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
4742 *
4743 * @param pVCpu Pointer to the VMCPU.
4744 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4745 * out-of-sync. Make sure to update the required fields
4746 * before using them.
4747 */
4748DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4749{
4750 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
4751 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4752 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4753 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4754}
4755
4756
4757/**
4758 * Handle a condition that occurred while delivering an event through the guest
4759 * IDT.
4760 *
4761 * @returns VBox status code (informational error codes included).
4762 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4763 * @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to
4764 * continue execution of the guest which will delivery the #DF.
4765 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4766 *
4767 * @param pVCpu Pointer to the VMCPU.
4768 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4769 * out-of-sync. Make sure to update the required fields
4770 * before using them.
4771 * @param pVmxTransient Pointer to the VMX transient structure.
4772 *
4773 * @remarks No-long-jump zone!!!
4774 */
4775static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4776{
4777 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4778 AssertRC(rc);
4779 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4780 {
4781 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4782 AssertRCReturn(rc, rc);
4783
4784 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4785 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4786 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4787
4788 typedef enum
4789 {
4790 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4791 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4792 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4793 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4794 } VMXREFLECTXCPT;
4795
4796 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4797 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4798 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4799 {
4800 enmReflect = VMXREFLECTXCPT_XCPT;
4801#ifdef VBOX_STRICT
4802 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4803 && uExitVector == X86_XCPT_PF)
4804 {
4805 Log4(("IDT: Contributory #PF uCR2=%#RX64\n", pMixedCtx->cr2));
4806 }
4807#endif
4808 if ( uExitVector == X86_XCPT_PF
4809 && uIdtVector == X86_XCPT_PF)
4810 {
4811 pVmxTransient->fVectoringPF = true;
4812 Log4(("IDT: Vectoring #PF uCR2=%#RX64\n", pMixedCtx->cr2));
4813 }
4814 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
4815 && hmR0VmxIsContributoryXcpt(uExitVector)
4816 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
4817 || uIdtVector == X86_XCPT_PF))
4818 {
4819 enmReflect = VMXREFLECTXCPT_DF;
4820 }
4821 else if (uIdtVector == X86_XCPT_DF)
4822 enmReflect = VMXREFLECTXCPT_TF;
4823 }
4824 else if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4825 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
4826 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
4827 {
4828 /*
4829 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
4830 * (whatever they are) as they reoccur when restarting the instruction.
4831 */
4832 enmReflect = VMXREFLECTXCPT_XCPT;
4833 }
4834
4835 switch (enmReflect)
4836 {
4837 case VMXREFLECTXCPT_XCPT:
4838 {
4839 uint32_t u32ErrCode = 0;
4840 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
4841 {
4842 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
4843 AssertRCReturn(rc, rc);
4844 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
4845 }
4846
4847 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
4848 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
4849 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
4850 rc = VINF_SUCCESS;
4851 Log4(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo,
4852 pVCpu->hm.s.Event.u32ErrCode));
4853 break;
4854 }
4855
4856 case VMXREFLECTXCPT_DF:
4857 {
4858 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
4859 rc = VINF_VMX_DOUBLE_FAULT;
4860 Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
4861 uIdtVector, uExitVector));
4862 break;
4863 }
4864
4865 case VMXREFLECTXCPT_TF:
4866 {
4867 Log4(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
4868 rc = VINF_EM_RESET;
4869 break;
4870 }
4871
4872 default:
4873 Assert(rc == VINF_SUCCESS);
4874 break;
4875 }
4876 }
4877 Assert(rc == VINF_SUCCESS || rc == VINF_VMX_DOUBLE_FAULT || rc == VINF_EM_RESET);
4878 return rc;
4879}
4880
4881
4882/**
4883 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
4884 *
4885 * @returns VBox status code.
4886 * @param pVCpu Pointer to the VMCPU.
4887 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4888 * out-of-sync. Make sure to update the required fields
4889 * before using them.
4890 *
4891 * @remarks No-long-jump zone!!!
4892 */
4893static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4894{
4895 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
4896 {
4897 uint32_t uVal = 0;
4898 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
4899 AssertRCReturn(rc, rc);
4900 uint32_t uShadow = 0;
4901 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
4902 AssertRCReturn(rc, rc);
4903
4904 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
4905 CPUMSetGuestCR0(pVCpu, uVal);
4906 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
4907 }
4908 return VINF_SUCCESS;
4909}
4910
4911
4912/**
4913 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
4914 *
4915 * @returns VBox status code.
4916 * @param pVCpu Pointer to the VMCPU.
4917 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4918 * out-of-sync. Make sure to update the required fields
4919 * before using them.
4920 *
4921 * @remarks No-long-jump zone!!!
4922 */
4923static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4924{
4925 int rc = VINF_SUCCESS;
4926 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
4927 {
4928 uint32_t uVal = 0;
4929 uint32_t uShadow = 0;
4930 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
4931 AssertRCReturn(rc, rc);
4932 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
4933 AssertRCReturn(rc, rc);
4934
4935 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
4936 CPUMSetGuestCR4(pVCpu, uVal);
4937 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
4938 }
4939 return rc;
4940}
4941
4942
4943/**
4944 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
4945 *
4946 * @returns VBox status code.
4947 * @param pVCpu Pointer to the VMCPU.
4948 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4949 * out-of-sync. Make sure to update the required fields
4950 * before using them.
4951 *
4952 * @remarks No-long-jump zone!!!
4953 */
4954static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4955{
4956 int rc = VINF_SUCCESS;
4957 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
4958 {
4959 uint64_t u64Val = 0;
4960 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
4961 AssertRCReturn(rc, rc);
4962
4963 pMixedCtx->rip = u64Val;
4964 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
4965 }
4966 return rc;
4967}
4968
4969
4970/**
4971 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
4972 *
4973 * @returns VBox status code.
4974 * @param pVCpu Pointer to the VMCPU.
4975 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4976 * out-of-sync. Make sure to update the required fields
4977 * before using them.
4978 *
4979 * @remarks No-long-jump zone!!!
4980 */
4981static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4982{
4983 int rc = VINF_SUCCESS;
4984 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
4985 {
4986 uint64_t u64Val = 0;
4987 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
4988 AssertRCReturn(rc, rc);
4989
4990 pMixedCtx->rsp = u64Val;
4991 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
4992 }
4993 return rc;
4994}
4995
4996
4997/**
4998 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
4999 *
5000 * @returns VBox status code.
5001 * @param pVCpu Pointer to the VMCPU.
5002 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5003 * out-of-sync. Make sure to update the required fields
5004 * before using them.
5005 *
5006 * @remarks No-long-jump zone!!!
5007 */
5008static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5009{
5010 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
5011 {
5012 uint32_t uVal = 0;
5013 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
5014 AssertRCReturn(rc, rc);
5015
5016 pMixedCtx->eflags.u32 = uVal;
5017 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
5018 {
5019 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5020 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
5021
5022 pMixedCtx->eflags.Bits.u1VM = 0;
5023 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
5024 }
5025
5026 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
5027 }
5028 return VINF_SUCCESS;
5029}
5030
5031
5032/**
5033 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
5034 * guest-CPU context.
5035 */
5036DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5037{
5038 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5039 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
5040 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5041 return rc;
5042}
5043
5044
5045/**
5046 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
5047 * from the guest-state area in the VMCS.
5048 *
5049 * @param pVCpu Pointer to the VMCPU.
5050 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5051 * out-of-sync. Make sure to update the required fields
5052 * before using them.
5053 *
5054 * @remarks No-long-jump zone!!!
5055 */
5056static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5057{
5058 uint32_t uIntrState = 0;
5059 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
5060 AssertRC(rc);
5061
5062 if (!uIntrState)
5063 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5064 else
5065 {
5066 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
5067 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5068 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5069 AssertRC(rc);
5070 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
5071 AssertRC(rc);
5072
5073 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
5074 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
5075 }
5076}
5077
5078
5079/**
5080 * Saves the guest's activity state.
5081 *
5082 * @returns VBox status code.
5083 * @param pVCpu Pointer to the VMCPU.
5084 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5085 * out-of-sync. Make sure to update the required fields
5086 * before using them.
5087 *
5088 * @remarks No-long-jump zone!!!
5089 */
5090static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5091{
5092 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
5093 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
5094 return VINF_SUCCESS;
5095}
5096
5097
5098/**
5099 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
5100 * the current VMCS into the guest-CPU context.
5101 *
5102 * @returns VBox status code.
5103 * @param pVCpu Pointer to the VMCPU.
5104 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5105 * out-of-sync. Make sure to update the required fields
5106 * before using them.
5107 *
5108 * @remarks No-long-jump zone!!!
5109 */
5110static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5111{
5112 int rc = VINF_SUCCESS;
5113 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
5114 {
5115 uint32_t u32Val = 0;
5116 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
5117 pMixedCtx->SysEnter.cs = u32Val;
5118 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
5119 }
5120
5121 uint64_t u64Val = 0;
5122 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
5123 {
5124 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
5125 pMixedCtx->SysEnter.eip = u64Val;
5126 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
5127 }
5128 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
5129 {
5130 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
5131 pMixedCtx->SysEnter.esp = u64Val;
5132 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
5133 }
5134 return rc;
5135}
5136
5137
5138/**
5139 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
5140 * context.
5141 *
5142 * @returns VBox status code.
5143 * @param pVCpu Pointer to the VMCPU.
5144 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5145 * out-of-sync. Make sure to update the required fields
5146 * before using them.
5147 *
5148 * @remarks No-long-jump zone!!!
5149 */
5150static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5151{
5152 int rc = VINF_SUCCESS;
5153 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
5154 {
5155 uint64_t u64Val = 0;
5156 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &u64Val); AssertRCReturn(rc, rc);
5157 pMixedCtx->fs.u64Base = u64Val;
5158 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
5159 }
5160 return rc;
5161}
5162
5163
5164/**
5165 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
5166 * context.
5167 *
5168 * @returns VBox status code.
5169 * @param pVCpu Pointer to the VMCPU.
5170 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5171 * out-of-sync. Make sure to update the required fields
5172 * before using them.
5173 *
5174 * @remarks No-long-jump zone!!!
5175 */
5176static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5177{
5178 int rc = VINF_SUCCESS;
5179 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
5180 {
5181 uint64_t u64Val = 0;
5182 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &u64Val); AssertRCReturn(rc, rc);
5183 pMixedCtx->gs.u64Base = u64Val;
5184 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
5185 }
5186 return rc;
5187}
5188
5189
5190/**
5191 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5192 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
5193 * and TSC_AUX.
5194 *
5195 * @returns VBox status code.
5196 * @param pVCpu Pointer to the VMCPU.
5197 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5198 * out-of-sync. Make sure to update the required fields
5199 * before using them.
5200 *
5201 * @remarks No-long-jump zone!!!
5202 */
5203static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5204{
5205 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5206 return VINF_SUCCESS;
5207
5208#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
5209 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5210 {
5211 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5212 pMsr += i;
5213 switch (pMsr->u32IndexMSR)
5214 {
5215 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5216 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5217 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5218 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5219 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5220 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5221 default:
5222 {
5223 AssertFailed();
5224 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5225 }
5226 }
5227 }
5228#endif
5229
5230 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5231 return VINF_SUCCESS;
5232}
5233
5234
5235/**
5236 * Saves the guest control registers from the current VMCS into the guest-CPU
5237 * context.
5238 *
5239 * @returns VBox status code.
5240 * @param pVCpu Pointer to the VMCPU.
5241 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5242 * out-of-sync. Make sure to update the required fields
5243 * before using them.
5244 *
5245 * @remarks No-long-jump zone!!!
5246 */
5247static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5248{
5249 /* Guest CR0. Guest FPU. */
5250 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5251 AssertRCReturn(rc, rc);
5252
5253 /* Guest CR4. */
5254 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
5255 AssertRCReturn(rc, rc);
5256
5257 /* Guest CR2 - updated always during the world-switch or in #PF. */
5258 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5259 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
5260 {
5261 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
5262 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4);
5263
5264 PVM pVM = pVCpu->CTX_SUFF(pVM);
5265 if ( pVM->hm.s.vmx.fUnrestrictedGuest
5266 || ( pVM->hm.s.fNestedPaging
5267 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
5268 {
5269 uint64_t u64Val = 0;
5270 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
5271 if (pMixedCtx->cr3 != u64Val)
5272 {
5273 CPUMSetGuestCR3(pVCpu, u64Val);
5274 if (VMMRZCallRing3IsEnabled(pVCpu))
5275 {
5276 PGMUpdateCR3(pVCpu, u64Val);
5277 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5278 }
5279 else
5280 {
5281 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5282 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5283 }
5284 }
5285
5286 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5287 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
5288 {
5289 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
5290 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
5291 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
5292 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
5293
5294 if (VMMRZCallRing3IsEnabled(pVCpu))
5295 {
5296 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5297 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5298 }
5299 else
5300 {
5301 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5302 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5303 }
5304 }
5305 }
5306
5307 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
5308 }
5309
5310 /*
5311 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
5312 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
5313 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
5314 *
5315 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
5316 */
5317 if (VMMRZCallRing3IsEnabled(pVCpu))
5318 {
5319 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5320 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
5321
5322 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5323 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5324
5325 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5326 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5327 }
5328
5329 return rc;
5330}
5331
5332
5333/**
5334 * Reads a guest segment register from the current VMCS into the guest-CPU
5335 * context.
5336 *
5337 * @returns VBox status code.
5338 * @param pVCpu Pointer to the VMCPU.
5339 * @param idxSel Index of the selector in the VMCS.
5340 * @param idxLimit Index of the segment limit in the VMCS.
5341 * @param idxBase Index of the segment base in the VMCS.
5342 * @param idxAccess Index of the access rights of the segment in the VMCS.
5343 * @param pSelReg Pointer to the segment selector.
5344 *
5345 * @remarks No-long-jump zone!!!
5346 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
5347 * macro as that takes care of whether to read from the VMCS cache or
5348 * not.
5349 */
5350DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5351 PCPUMSELREG pSelReg)
5352{
5353 uint32_t u32Val = 0;
5354 int rc = VMXReadVmcs32(idxSel, &u32Val);
5355 AssertRCReturn(rc, rc);
5356 pSelReg->Sel = (uint16_t)u32Val;
5357 pSelReg->ValidSel = (uint16_t)u32Val;
5358 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5359
5360 rc = VMXReadVmcs32(idxLimit, &u32Val);
5361 AssertRCReturn(rc, rc);
5362 pSelReg->u32Limit = u32Val;
5363
5364 uint64_t u64Val = 0;
5365 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
5366 AssertRCReturn(rc, rc);
5367 pSelReg->u64Base = u64Val;
5368
5369 rc = VMXReadVmcs32(idxAccess, &u32Val);
5370 AssertRCReturn(rc, rc);
5371 pSelReg->Attr.u = u32Val;
5372
5373 /*
5374 * If VT-x marks the segment as unusable, the rest of the attributes are undefined with certain exceptions (some bits in
5375 * CS, SS). Regardless, we have to clear the bits here and only retain the unusable bit because the unusable bit is specific
5376 * to VT-x, everyone else relies on the attribute being zero and have no clue what the unusable bit is.
5377 *
5378 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
5379 */
5380 if (pSelReg->Attr.u & HMVMX_SEL_UNUSABLE)
5381 {
5382 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
5383 pSelReg->Attr.u = HMVMX_SEL_UNUSABLE;
5384 }
5385 return VINF_SUCCESS;
5386}
5387
5388
5389#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5390#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5391 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5392 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5393#else
5394#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5395 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5396 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5397#endif
5398
5399
5400/**
5401 * Saves the guest segment registers from the current VMCS into the guest-CPU
5402 * context.
5403 *
5404 * @returns VBox status code.
5405 * @param pVCpu Pointer to the VMCPU.
5406 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5407 * out-of-sync. Make sure to update the required fields
5408 * before using them.
5409 *
5410 * @remarks No-long-jump zone!!!
5411 */
5412static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5413{
5414 /* Guest segment registers. */
5415 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
5416 {
5417 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
5418 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
5419 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
5420 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
5421 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
5422 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
5423 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
5424
5425 /* Restore segment attributes for real-on-v86 mode hack. */
5426 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5427 {
5428 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrCS.u;
5429 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrSS.u;
5430 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrDS.u;
5431 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrES.u;
5432 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrFS.u;
5433 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u;
5434 }
5435 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
5436 }
5437
5438 return VINF_SUCCESS;
5439}
5440
5441
5442/**
5443 * Saves the guest descriptor table registers and task register from the current
5444 * VMCS into the guest-CPU context.
5445 *
5446 * @returns VBox status code.
5447 * @param pVCpu Pointer to the VMCPU.
5448 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5449 * out-of-sync. Make sure to update the required fields
5450 * before using them.
5451 *
5452 * @remarks No-long-jump zone!!!
5453 */
5454static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5455{
5456 int rc = VINF_SUCCESS;
5457
5458 /* Guest LDTR. */
5459 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
5460 {
5461 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5462 AssertRCReturn(rc, rc);
5463 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
5464 }
5465
5466 /* Guest GDTR. */
5467 uint64_t u64Val = 0;
5468 uint32_t u32Val = 0;
5469 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
5470 {
5471 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5472 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5473 pMixedCtx->gdtr.pGdt = u64Val;
5474 pMixedCtx->gdtr.cbGdt = u32Val;
5475 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
5476 }
5477
5478 /* Guest IDTR. */
5479 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
5480 {
5481 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5482 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5483 pMixedCtx->idtr.pIdt = u64Val;
5484 pMixedCtx->idtr.cbIdt = u32Val;
5485 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
5486 }
5487
5488 /* Guest TR. */
5489 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
5490 {
5491 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5492 AssertRCReturn(rc, rc);
5493
5494 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5495 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5496 {
5497 rc = VMXLOCAL_READ_SEG(TR, tr);
5498 AssertRCReturn(rc, rc);
5499 }
5500 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
5501 }
5502 return rc;
5503}
5504
5505#undef VMXLOCAL_READ_SEG
5506
5507
5508/**
5509 * Saves the guest debug registers from the current VMCS into the guest-CPU
5510 * context.
5511 *
5512 * @returns VBox status code.
5513 * @param pVCpu Pointer to the VMCPU.
5514 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5515 * out-of-sync. Make sure to update the required fields
5516 * before using them.
5517 *
5518 * @remarks No-long-jump zone!!!
5519 */
5520static int hmR0VmxSaveGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5521{
5522 int rc = VINF_SUCCESS;
5523 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
5524 {
5525 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
5526 uint32_t u32Val;
5527 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
5528 pMixedCtx->dr[7] = u32Val;
5529
5530 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
5531 }
5532 return rc;
5533}
5534
5535
5536/**
5537 * Saves the guest APIC state from the currentl VMCS into the guest-CPU context.
5538 *
5539 * @returns VBox status code.
5540 * @param pVCpu Pointer to the VMCPU.
5541 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5542 * out-of-sync. Make sure to update the required fields
5543 * before using them.
5544 *
5545 * @remarks No-long-jump zone!!!
5546 */
5547static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5548{
5549 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5550 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
5551 return VINF_SUCCESS;
5552}
5553
5554
5555/**
5556 * Saves the entire guest state from the currently active VMCS into the
5557 * guest-CPU context. This essentially VMREADs all guest-data.
5558 *
5559 * @returns VBox status code.
5560 * @param pVCpu Pointer to the VMCPU.
5561 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5562 * out-of-sync. Make sure to update the required fields
5563 * before using them.
5564 */
5565static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5566{
5567 Assert(pVCpu);
5568 Assert(pMixedCtx);
5569
5570 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
5571 return VINF_SUCCESS;
5572
5573 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled again on the ring-3 callback path,
5574 there is no real need to. */
5575 if (VMMRZCallRing3IsEnabled(pVCpu))
5576 VMMR0LogFlushDisable(pVCpu);
5577 else
5578 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5579 Log4Func(("\n"));
5580
5581 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
5582 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5583
5584 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5585 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5586
5587 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
5588 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5589
5590 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
5591 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5592
5593 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
5594 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5595
5596 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
5597 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5598
5599 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
5600 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5601
5602 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
5603 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5604
5605 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5606 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5607
5608 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
5609 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5610
5611 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
5612 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5613
5614 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
5615 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5616
5617 if (VMMRZCallRing3IsEnabled(pVCpu))
5618 VMMR0LogFlushEnable(pVCpu);
5619
5620 return rc;
5621}
5622
5623
5624/**
5625 * Check per-VM and per-VCPU force flag actions that require us to go back to
5626 * ring-3 for one reason or another.
5627 *
5628 * @returns VBox status code (information status code included).
5629 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5630 * ring-3.
5631 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5632 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5633 * interrupts)
5634 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5635 * all EMTs to be in ring-3.
5636 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5637 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5638 * to the EM loop.
5639 *
5640 * @param pVM Pointer to the VM.
5641 * @param pVCpu Pointer to the VMCPU.
5642 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5643 * out-of-sync. Make sure to update the required fields
5644 * before using them.
5645 */
5646static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5647{
5648 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5649
5650 int rc = VERR_INTERNAL_ERROR_5;
5651 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
5652 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
5653 | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES))
5654 {
5655 /* We need the control registers now, make sure the guest-CPU context is updated. */
5656 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5657 AssertRCReturn(rc, rc);
5658
5659 /* Pending HM CR3 sync. */
5660 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5661 {
5662 rc = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5663 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
5664 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5665 }
5666
5667 /* Pending HM PAE PDPEs. */
5668 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5669 {
5670 rc = PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5671 AssertRC(rc);
5672 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5673 }
5674
5675 /* Pending PGM C3 sync. */
5676 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5677 {
5678 rc = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5679 if (rc != VINF_SUCCESS)
5680 {
5681 AssertRC(rc);
5682 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
5683 return rc;
5684 }
5685 }
5686
5687 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5688 /* -XXX- what was that about single stepping? */
5689 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5690 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5691 {
5692 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5693 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5694 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
5695 return rc;
5696 }
5697
5698 /* Pending VM request packets, such as hardware interrupts. */
5699 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5700 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5701 {
5702 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5703 return VINF_EM_PENDING_REQUEST;
5704 }
5705
5706 /* Pending PGM pool flushes. */
5707 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5708 {
5709 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5710 return VINF_PGM_POOL_FLUSH_PENDING;
5711 }
5712
5713 /* Pending DMA requests. */
5714 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5715 {
5716 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5717 return VINF_EM_RAW_TO_R3;
5718 }
5719 }
5720
5721 /* Paranoia. */
5722 Assert(rc != VERR_EM_INTERPRETER);
5723 return VINF_SUCCESS;
5724}
5725
5726
5727/**
5728 * Converts any TRPM trap into a pending VMX event. This is typically used when
5729 * entering from ring-3 (not longjmp returns).
5730 *
5731 * @param pVCpu Pointer to the VMCPU.
5732 */
5733static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
5734{
5735 Assert(TRPMHasTrap(pVCpu));
5736 Assert(!pVCpu->hm.s.Event.fPending);
5737
5738 uint8_t uVector;
5739 TRPMEVENT enmTrpmEvent;
5740 RTGCUINT uErrCode;
5741 RTGCUINTPTR GCPtrFaultAddress;
5742 uint8_t cbInstr;
5743
5744 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
5745 AssertRC(rc);
5746
5747 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5748 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
5749 if (enmTrpmEvent == TRPM_TRAP)
5750 {
5751 switch (uVector)
5752 {
5753 case X86_XCPT_BP:
5754 case X86_XCPT_OF:
5755 {
5756 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5757 break;
5758 }
5759
5760 case X86_XCPT_PF:
5761 case X86_XCPT_DF:
5762 case X86_XCPT_TS:
5763 case X86_XCPT_NP:
5764 case X86_XCPT_SS:
5765 case X86_XCPT_GP:
5766 case X86_XCPT_AC:
5767 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5768 /* no break! */
5769 default:
5770 {
5771 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5772 break;
5773 }
5774 }
5775 }
5776 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5777 {
5778 if (uVector == X86_XCPT_NMI)
5779 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5780 else
5781 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5782 }
5783 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
5784 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5785 else
5786 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5787
5788 rc = TRPMResetTrap(pVCpu);
5789 AssertRC(rc);
5790 Log4(("TRPM->HM event: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
5791 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
5792 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
5793}
5794
5795
5796/**
5797 * Converts any pending VMX event into a TRPM trap. Typically used when leaving
5798 * VT-x to execute any instruction.
5799 *
5800 * @param pvCpu Pointer to the VMCPU.
5801 */
5802static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
5803{
5804 Assert(pVCpu->hm.s.Event.fPending);
5805
5806 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5807 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
5808 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
5809 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
5810
5811 /* If a trap was already pending, we did something wrong! */
5812 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
5813
5814 TRPMEVENT enmTrapType;
5815 switch (uVectorType)
5816 {
5817 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5818 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5819 enmTrapType = TRPM_HARDWARE_INT;
5820 break;
5821 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5822 enmTrapType = TRPM_SOFTWARE_INT;
5823 break;
5824 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5825 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
5826 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5827 enmTrapType = TRPM_TRAP;
5828 break;
5829 default:
5830 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
5831 enmTrapType = TRPM_32BIT_HACK;
5832 break;
5833 }
5834
5835 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
5836
5837 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
5838 AssertRC(rc);
5839
5840 if (fErrorCodeValid)
5841 TRPMSetErrorCode(pVCpu, uErrorCode);
5842
5843 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5844 && uVector == X86_XCPT_PF)
5845 {
5846 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
5847 }
5848 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5849 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5850 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5851 {
5852 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5853 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
5854 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
5855 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
5856 }
5857 pVCpu->hm.s.Event.fPending = false;
5858}
5859
5860
5861/**
5862 * Does the necessary state syncing before doing a longjmp to ring-3.
5863 *
5864 * @param pVM Pointer to the VM.
5865 * @param pVCpu Pointer to the VMCPU.
5866 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5867 * out-of-sync. Make sure to update the required fields
5868 * before using them.
5869 * @param rcExit The reason for exiting to ring-3. Can be
5870 * VINF_VMM_UNKNOWN_RING3_CALL.
5871 *
5872 * @remarks No-long-jmp zone!!!
5873 */
5874static void hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5875{
5876 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
5877 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5878
5879 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
5880 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
5881 AssertRC(rc);
5882
5883 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
5884 if (CPUMIsGuestFPUStateActive(pVCpu))
5885 {
5886 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
5887 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
5888 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
5889 }
5890
5891 /* Restore host debug registers if necessary and resync on next R0 reentry. */
5892 if (CPUMIsGuestDebugStateActive(pVCpu))
5893 {
5894 CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
5895 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5896 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
5897 }
5898 else if (CPUMIsHyperDebugStateActive(pVCpu))
5899 {
5900 CPUMR0LoadHostDebugState(pVM, pVCpu);
5901 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
5902 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
5903 }
5904
5905 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
5906 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
5907 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
5908 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
5909 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
5910 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
5911 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
5912 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5913 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
5914}
5915
5916
5917/**
5918 * An action requires us to go back to ring-3. This function does the necessary
5919 * steps before we can safely return to ring-3. This is not the same as longjmps
5920 * to ring-3, this is voluntary.
5921 *
5922 * @param pVM Pointer to the VM.
5923 * @param pVCpu Pointer to the VMCPU.
5924 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5925 * out-of-sync. Make sure to update the required fields
5926 * before using them.
5927 * @param rcExit The reason for exiting to ring-3. Can be
5928 * VINF_VMM_UNKNOWN_RING3_CALL.
5929 */
5930static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5931{
5932 Assert(pVM);
5933 Assert(pVCpu);
5934 Assert(pMixedCtx);
5935 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5936
5937 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
5938 {
5939 /* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
5940 return;
5941 }
5942 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
5943 {
5944 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
5945 pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
5946 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5947 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();
5948 return;
5949 }
5950
5951 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
5952 VMMRZCallRing3Disable(pVCpu);
5953 Log4(("hmR0VmxExitToRing3: rcExit=%d\n", rcExit));
5954
5955 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
5956 if (pVCpu->hm.s.Event.fPending)
5957 {
5958 hmR0VmxPendingEventToTrpmTrap(pVCpu);
5959 Assert(!pVCpu->hm.s.Event.fPending);
5960 }
5961
5962 /* Sync. the guest state. */
5963 hmR0VmxLongJmpToRing3(pVM, pVCpu, pMixedCtx, rcExit);
5964 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5965
5966 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
5967 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
5968 | CPUM_CHANGED_LDTR
5969 | CPUM_CHANGED_GDTR
5970 | CPUM_CHANGED_IDTR
5971 | CPUM_CHANGED_TR
5972 | CPUM_CHANGED_HIDDEN_SEL_REGS);
5973
5974 /* On our way back from ring-3 the following needs to be done. */
5975 /** @todo This can change with preemption hooks. */
5976 if (rcExit == VINF_EM_RAW_INTERRUPT)
5977 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
5978 else
5979 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
5980
5981 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
5982 VMMRZCallRing3Enable(pVCpu);
5983}
5984
5985
5986/**
5987 * VMMRZCallRing3() callback wrapper which saves the guest state before we
5988 * longjump to ring-3 and possibly get preempted.
5989 *
5990 * @param pVCpu Pointer to the VMCPU.
5991 * @param enmOperation The operation causing the ring-3 longjump.
5992 * @param pvUser The user argument (pointer to the possibly
5993 * out-of-date guest-CPU context).
5994 *
5995 * @remarks Must never be called with @a enmOperation ==
5996 * VMMCALLRING3_VM_R0_ASSERTION.
5997 */
5998DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
5999{
6000 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion. */
6001 Assert(pVCpu);
6002 Assert(pvUser);
6003 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6004 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6005
6006 VMMRZCallRing3Disable(pVCpu);
6007 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6008 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n"));
6009 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
6010 VMMRZCallRing3Enable(pVCpu);
6011}
6012
6013
6014/**
6015 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
6016 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
6017 *
6018 * @param pVCpu Pointer to the VMCPU.
6019 */
6020DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
6021{
6022 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6023 {
6024 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6025 {
6026 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
6027 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
6028 AssertRC(rc);
6029 }
6030 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
6031}
6032
6033
6034/**
6035 * Injects any pending events into the guest if the guest is in a state to
6036 * receive them.
6037 *
6038 * @returns VBox status code (informational status codes included).
6039 * @param pVCpu Pointer to the VMCPU.
6040 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6041 * out-of-sync. Make sure to update the required fields
6042 * before using them.
6043 */
6044static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6045{
6046 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
6047 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
6048 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6049 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6050
6051 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
6052 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
6053 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
6054 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
6055 Assert(!TRPMHasTrap(pVCpu));
6056
6057 int rc = VINF_SUCCESS;
6058 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
6059 {
6060 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
6061 bool fInject = true;
6062 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
6063 {
6064 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6065 AssertRCReturn(rc, rc);
6066 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6067 if ( fBlockInt
6068 || fBlockSti
6069 || fBlockMovSS)
6070 {
6071 fInject = false;
6072 }
6073 }
6074 else if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6075 && ( fBlockMovSS
6076 || fBlockSti))
6077 {
6078 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6079 fInject = false;
6080 }
6081
6082 if (fInject)
6083 {
6084 Log4(("Injecting pending event\n"));
6085 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
6086 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
6087 AssertRCReturn(rc, rc);
6088 pVCpu->hm.s.Event.fPending = false;
6089 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
6090 }
6091 else
6092 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6093 } /** @todo SMI. SMIs take priority over NMIs. */
6094 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
6095 {
6096 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6097 if ( !fBlockMovSS
6098 && !fBlockSti)
6099 {
6100 Log4(("Injecting NMI\n"));
6101 uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
6102 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6103 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
6104 0 /* GCPtrFaultAddress */, &uIntrState);
6105 AssertRCReturn(rc, rc);
6106 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
6107 }
6108 else
6109 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6110 }
6111 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
6112 {
6113 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
6114 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6115 AssertRCReturn(rc, rc);
6116 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6117 if ( !fBlockInt
6118 && !fBlockSti
6119 && !fBlockMovSS)
6120 {
6121 uint8_t u8Interrupt;
6122 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
6123 if (RT_SUCCESS(rc))
6124 {
6125 Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
6126 uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
6127 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6128 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
6129 0 /* GCPtrFaultAddress */, &uIntrState);
6130 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
6131 }
6132 else
6133 {
6134 /** @todo Does this actually happen? If not turn it into an assertion. */
6135 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
6136 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
6137 rc = VINF_SUCCESS;
6138 }
6139 }
6140 else
6141 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6142 }
6143
6144 /*
6145 * Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
6146 * hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
6147 */
6148 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6149 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6150 int rc2 = VINF_SUCCESS;
6151 if ( fBlockSti
6152 || fBlockMovSS)
6153 {
6154 if (!DBGFIsStepping(pVCpu))
6155 {
6156 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
6157 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
6158 {
6159 /*
6160 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, VMX_EXIT_MTF
6161 * VMX_EXIT_APIC_WRITE, VMX_EXIT_VIRTUALIZED_EOI. See Intel spec. 27.3.4 "Saving Non-Register State".
6162 */
6163 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
6164 AssertRCReturn(rc, rc);
6165 }
6166 }
6167 else
6168 {
6169 /* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
6170 delivering a #DB to the guest upon VM-entry when it shouldn't be. */
6171 uIntrState = 0;
6172 }
6173 }
6174
6175 /*
6176 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
6177 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6178 */
6179 rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
6180 AssertRC(rc2);
6181
6182 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6183 return rc;
6184}
6185
6186
6187/**
6188 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
6189 *
6190 * @param pVCpu Pointer to the VMCPU.
6191 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6192 * out-of-sync. Make sure to update the required fields
6193 * before using them.
6194 */
6195DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6196{
6197 uint32_t u32IntrInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
6198 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6199}
6200
6201
6202/**
6203 * Injects a double-fault (#DF) exception into the VM.
6204 *
6205 * @returns VBox status code (informational status code included).
6206 * @param pVCpu Pointer to the VMCPU.
6207 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6208 * out-of-sync. Make sure to update the required fields
6209 * before using them.
6210 */
6211DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
6212{
6213 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6214 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6215 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6216 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
6217 puIntrState);
6218}
6219
6220
6221/**
6222 * Sets a debug (#DB) exception as pending-for-injection into the VM.
6223 *
6224 * @param pVCpu Pointer to the VMCPU.
6225 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6226 * out-of-sync. Make sure to update the required fields
6227 * before using them.
6228 */
6229DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6230{
6231 uint32_t u32IntrInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
6232 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6233 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6234}
6235
6236
6237/**
6238 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
6239 *
6240 * @param pVCpu Pointer to the VMCPU.
6241 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6242 * out-of-sync. Make sure to update the required fields
6243 * before using them.
6244 * @param cbInstr The value of RIP that is to be pushed on the guest
6245 * stack.
6246 */
6247DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
6248{
6249 uint32_t u32IntrInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6250 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6251 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6252}
6253
6254
6255/**
6256 * Injects a general-protection (#GP) fault into the VM.
6257 *
6258 * @returns VBox status code (informational status code included).
6259 * @param pVCpu Pointer to the VMCPU.
6260 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6261 * out-of-sync. Make sure to update the required fields
6262 * before using them.
6263 * @param u32ErrorCode The error code associated with the #GP.
6264 */
6265DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
6266 uint32_t *puIntrState)
6267{
6268 uint32_t u32IntrInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
6269 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6270 if (fErrorCodeValid)
6271 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6272 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
6273 puIntrState);
6274}
6275
6276
6277/**
6278 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
6279 *
6280 * @param pVCpu Pointer to the VMCPU.
6281 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6282 * out-of-sync. Make sure to update the required fields
6283 * before using them.
6284 * @param uVector The software interrupt vector number.
6285 * @param cbInstr The value of RIP that is to be pushed on the guest
6286 * stack.
6287 */
6288DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
6289{
6290 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6291 if ( uVector == X86_XCPT_BP
6292 || uVector == X86_XCPT_OF)
6293 {
6294 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6295 }
6296 else
6297 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6298 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6299}
6300
6301
6302/**
6303 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6304 * stack.
6305 *
6306 * @returns VBox status code (information status code included).
6307 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6308 * @param pVM Pointer to the VM.
6309 * @param pMixedCtx Pointer to the guest-CPU context.
6310 * @param uValue The value to push to the guest stack.
6311 */
6312DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6313{
6314 /*
6315 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6316 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6317 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6318 */
6319 if (pMixedCtx->sp == 1)
6320 return VINF_EM_RESET;
6321 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6322 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6323 AssertRCReturn(rc, rc);
6324 return rc;
6325}
6326
6327
6328/**
6329 * Injects an event into the guest upon VM-entry by updating the relevant fields
6330 * in the VM-entry area in the VMCS.
6331 *
6332 * @returns VBox status code (informational error codes included).
6333 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6334 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6335 *
6336 * @param pVCpu Pointer to the VMCPU.
6337 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6338 * be out-of-sync. Make sure to update the required
6339 * fields before using them.
6340 * @param u64IntrInfo The VM-entry interruption-information field.
6341 * @param cbInstr The VM-entry instruction length in bytes (for
6342 * software interrupts, exceptions and privileged
6343 * software exceptions).
6344 * @param u32ErrCode The VM-entry exception error code.
6345 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
6346 * @param puIntrState Pointer to the current guest interruptibility-state.
6347 * This interruptibility-state will be updated if
6348 * necessary. This cannot not be NULL.
6349 *
6350 * @remarks No-long-jump zone!!!
6351 * @remarks Requires CR0!
6352 */
6353static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6354 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
6355{
6356 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6357 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6358 Assert(puIntrState);
6359 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6360
6361 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6362 const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
6363
6364 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6365 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6366 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
6367
6368 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6369
6370 /* We require CR0 to check if the guest is in real-mode. */
6371 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6372 AssertRCReturn(rc, rc);
6373
6374 /*
6375 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6376 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6377 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6378 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6379 */
6380 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6381 {
6382 PVM pVM = pVCpu->CTX_SUFF(pVM);
6383 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6384 {
6385 Assert(PDMVmmDevHeapIsEnabled(pVM));
6386 Assert(pVM->hm.s.vmx.pRealModeTSS);
6387
6388 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
6389 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6390 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6391 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6392 AssertRCReturn(rc, rc);
6393 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
6394
6395 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6396 const size_t cbIdtEntry = 4;
6397 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6398 {
6399 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6400 if (uVector == X86_XCPT_DF)
6401 return VINF_EM_RESET;
6402 else if (uVector == X86_XCPT_GP)
6403 {
6404 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6405 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
6406 }
6407
6408 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6409 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6410 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
6411 }
6412
6413 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6414 uint16_t uGuestIp = pMixedCtx->ip;
6415 if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6416 {
6417 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6418 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6419 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6420 }
6421 else if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6422 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6423
6424 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6425 uint16_t offIdtEntry = 0;
6426 RTSEL selIdtEntry = 0;
6427 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6428 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6429 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6430 AssertRCReturn(rc, rc);
6431
6432 /* Construct the stack frame for the interrupt/exception handler. */
6433 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6434 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6435 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6436 AssertRCReturn(rc, rc);
6437
6438 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6439 if (rc == VINF_SUCCESS)
6440 {
6441 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6442 pMixedCtx->rip = offIdtEntry;
6443 pMixedCtx->cs.Sel = selIdtEntry;
6444 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6445 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6446 && uVector == X86_XCPT_PF)
6447 {
6448 pMixedCtx->cr2 = GCPtrFaultAddress;
6449 }
6450 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6451 | HM_CHANGED_GUEST_RIP
6452 | HM_CHANGED_GUEST_RFLAGS
6453 | HM_CHANGED_GUEST_RSP;
6454
6455 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
6456 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
6457 {
6458 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6459 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6460 Log4(("Clearing inhibition due to STI.\n"));
6461 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
6462 }
6463 Log4(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6464 }
6465 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6466 return rc;
6467 }
6468 else
6469 {
6470 /*
6471 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6472 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6473 */
6474 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6475 }
6476 }
6477
6478 /* Validate. */
6479 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6480 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6481 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6482
6483 /* Inject. */
6484 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6485 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6486 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6487 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6488
6489 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6490 && uVector == X86_XCPT_PF)
6491 {
6492 pMixedCtx->cr2 = GCPtrFaultAddress;
6493 }
6494
6495 Log4(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", u32IntrInfo, u32ErrCode, cbInstr,
6496 pMixedCtx->cr2));
6497
6498 AssertRCReturn(rc, rc);
6499 return rc;
6500}
6501
6502
6503/**
6504 * Enters the VT-x session.
6505 *
6506 * @returns VBox status code.
6507 * @param pVM Pointer to the VM.
6508 * @param pVCpu Pointer to the VMCPU.
6509 * @param pCpu Pointer to the CPU info struct.
6510 */
6511VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
6512{
6513 AssertPtr(pVM);
6514 AssertPtr(pVCpu);
6515 Assert(pVM->hm.s.vmx.fSupported);
6516 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6517 NOREF(pCpu);
6518
6519 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6520
6521 /* Make sure we're in VMX root mode. */
6522 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6523 if (!(u32HostCR4 & X86_CR4_VMXE))
6524 {
6525 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6526 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6527 }
6528
6529 /* Load the active VMCS as the current one. */
6530 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6531 if (RT_FAILURE(rc))
6532 return rc;
6533
6534 /** @todo this will change with preemption hooks where can can VMRESUME as long
6535 * as we're no preempted. */
6536 pVCpu->hm.s.fResumeVM = false;
6537 return VINF_SUCCESS;
6538}
6539
6540
6541/**
6542 * Leaves the VT-x session.
6543 *
6544 * @returns VBox status code.
6545 * @param pVM Pointer to the VM.
6546 * @param pVCpu Pointer to the VMCPU.
6547 * @param pCtx Pointer to the guest-CPU context.
6548 */
6549VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6550{
6551 AssertPtr(pVCpu);
6552 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6553 NOREF(pVM);
6554 NOREF(pCtx);
6555
6556 /** @todo this will change with preemption hooks where we only VMCLEAR when
6557 * we are actually going to be preempted, not all the time like we
6558 * currently do. */
6559
6560 /* Restore host-state bits that VT-x only restores partially. */
6561 if (pVCpu->hm.s.vmx.fRestoreHostFlags)
6562 {
6563#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6564 /** @todo r=ramshankar: This is broken when
6565 * VBOX_WITH_VMMR0_DISABLE_PREEMPTION is not defined. As
6566 * VMXRestoreHostState() may unconditionally enables interrupts. */
6567#error "VMM: Fix Me! Make VMXRestoreHostState() function to skip cli/sti."
6568#else
6569 Assert(ASMIntAreEnabled());
6570 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6571#endif
6572 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6573 }
6574
6575 /*
6576 * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
6577 * and mark the VMCS launch-state as "clear".
6578 */
6579 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6580 return rc;
6581}
6582
6583
6584/**
6585 * Saves the host state in the VMCS host-state.
6586 * Sets up the VM-exit MSR-load area.
6587 *
6588 * The CPU state will be loaded from these fields on every successful VM-exit.
6589 *
6590 * @returns VBox status code.
6591 * @param pVM Pointer to the VM.
6592 * @param pVCpu Pointer to the VMCPU.
6593 *
6594 * @remarks No-long-jump zone!!!
6595 */
6596VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
6597{
6598 AssertPtr(pVM);
6599 AssertPtr(pVCpu);
6600 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6601
6602 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6603
6604 /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
6605 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
6606 return VINF_SUCCESS;
6607
6608 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
6609 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6610
6611 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
6612 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6613
6614 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
6615 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6616
6617 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
6618 return rc;
6619}
6620
6621
6622/**
6623 * Loads the guest state into the VMCS guest-state area. The CPU state will be
6624 * loaded from these fields on every successful VM-entry.
6625 *
6626 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
6627 * Sets up the VM-entry controls.
6628 * Sets up the appropriate VMX non-root function to execute guest code based on
6629 * the guest CPU mode.
6630 *
6631 * @returns VBox status code.
6632 * @param pVM Pointer to the VM.
6633 * @param pVCpu Pointer to the VMCPU.
6634 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6635 * out-of-sync. Make sure to update the required fields
6636 * before using them.
6637 *
6638 * @remarks No-long-jump zone!!!
6639 */
6640VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6641{
6642 AssertPtr(pVM);
6643 AssertPtr(pVCpu);
6644 AssertPtr(pMixedCtx);
6645 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6646
6647 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6648
6649 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
6650
6651 /* Determine real-on-v86 mode. */
6652 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
6653 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
6654 && CPUMIsGuestInRealModeEx(pMixedCtx))
6655 {
6656 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
6657 }
6658
6659 /*
6660 * Load the guest-state into the VMCS.
6661 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
6662 * Ideally, assert that the cross-dependent bits are up to date at the point of using it.
6663 */
6664 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
6665 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6666
6667 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
6668 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6669
6670 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
6671 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6672
6673 rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
6674 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6675
6676 /* Must be done after CR0 is loaded (strict builds require CR0 for segment register validation checks). */
6677 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
6678 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6679
6680 rc = hmR0VmxLoadGuestDebugRegs(pVCpu, pMixedCtx);
6681 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6682
6683 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
6684 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6685
6686 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
6687 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6688
6689 /* Must be done after hmR0VmxLoadGuestDebugRegs() as it may update eflags.TF for debugging purposes. */
6690 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
6691 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6692
6693 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
6694 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6695
6696 /* Clear any unused and reserved bits. */
6697 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_CR2
6698 | HM_CHANGED_VMX_RESERVED1
6699 | HM_CHANGED_VMX_RESERVED2);
6700
6701 AssertMsg(!pVCpu->hm.s.fContextUseFlags,
6702 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
6703 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
6704
6705 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
6706 return rc;
6707}
6708
6709
6710/**
6711 * Does the preparations before executing guest code in VT-x.
6712 *
6713 * This may cause longjmps to ring-3 and may even result in rescheduling to the
6714 * recompiler. We must be cautious what we do here regarding committing
6715 * guest-state information into the the VMCS assuming we assuredly execute the
6716 * guest in VT-x. If we fall back to the recompiler after updating the VMCS and
6717 * clearing the common-state (TRPM/forceflags), we must undo those changes so
6718 * that the recompiler can (and should) use them when it resumes guest
6719 * execution. Otherwise such operations must be done when we can no longer
6720 * exit to ring-3.
6721 *
6722 * @returns VBox status code (informational status codes included).
6723 * @retval VINF_SUCCESS if we can proceed with running the guest.
6724 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
6725 * into the guest.
6726 * @retval VINF_* scheduling changes, we have to go back to ring-3.
6727 *
6728 * @param pVM Pointer to the VM.
6729 * @param pVCpu Pointer to the VMCPU.
6730 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6731 * out-of-sync. Make sure to update the required fields
6732 * before using them.
6733 * @param pVmxTransient Pointer to the VMX transient structure.
6734 *
6735 * @remarks Called with preemption disabled.
6736 */
6737DECLINLINE(int) hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6738{
6739 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6740
6741#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
6742 PGMRZDynMapFlushAutoSet(pVCpu);
6743#endif
6744
6745 /* Check force flag actions that might require us to go back to ring-3. */
6746 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
6747 if (rc != VINF_SUCCESS)
6748 return rc;
6749
6750 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
6751 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
6752 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
6753 {
6754 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
6755 RTGCPHYS GCPhysApicBase;
6756 GCPhysApicBase = pMixedCtx->msrApicBase;
6757 GCPhysApicBase &= PAGE_BASE_GC_MASK;
6758
6759 /* Unalias any existing mapping. */
6760 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
6761 AssertRCReturn(rc, rc);
6762
6763 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
6764 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
6765 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
6766 AssertRCReturn(rc, rc);
6767
6768 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
6769 }
6770
6771#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6772 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
6773 pVmxTransient->uEFlags = ASMIntDisableFlags();
6774 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
6775 {
6776 ASMSetFlags(pVmxTransient->uEFlags);
6777 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
6778 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
6779 return VINF_EM_RAW_INTERRUPT;
6780 }
6781 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6782 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6783#endif
6784
6785 /*
6786 * Evaluates and injects any pending events, toggling force-flags and updating the guest-interruptibility
6787 * state (interrupt shadow) in the VMCS. This -can- potentially be reworked to be done before disabling
6788 * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration.
6789 */
6790 /** @todo Rework event evaluation and injection to be completely separate. */
6791 if (TRPMHasTrap(pVCpu))
6792 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
6793
6794 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
6795 AssertRCReturn(rc, rc);
6796 return rc;
6797}
6798
6799
6800/**
6801 * Prepares to run guest code in VT-x and we've committed to doing so. This
6802 * means there is no backing out to ring-3 or anywhere else at this
6803 * point.
6804 *
6805 * @param pVM Pointer to the VM.
6806 * @param pVCpu Pointer to the VMCPU.
6807 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6808 * out-of-sync. Make sure to update the required fields
6809 * before using them.
6810 * @param pVmxTransient Pointer to the VMX transient structure.
6811 *
6812 * @remarks Called with preemption disabled.
6813 * @remarks No-long-jump zone!!!
6814 */
6815DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6816{
6817 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6818 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6819
6820#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6821 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
6822 pVmxTransient->uEFlags = ASMIntDisableFlags();
6823 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6824#endif
6825
6826 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
6827 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
6828 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
6829#ifdef HMVMX_SYNC_FULL_GUEST_STATE
6830 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
6831#endif
6832 int rc = VINF_SUCCESS;
6833 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
6834 {
6835 rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
6836 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
6837 }
6838 else if (pVCpu->hm.s.fContextUseFlags)
6839 {
6840 rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
6841 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
6842 }
6843 AssertRC(rc);
6844 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
6845
6846 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
6847 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
6848 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
6849
6850 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
6851 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
6852 {
6853 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
6854 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
6855 }
6856
6857 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
6858 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
6859 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
6860
6861 /*
6862 * TPR patching (only active for 32-bit guests on 64-bit capable CPUs) when the CPU does not supported virtualizing
6863 * APIC accesses feature (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).
6864 */
6865 if (pVM->hm.s.fTPRPatchingActive)
6866 {
6867 Assert(!CPUMIsGuestInLongMode(pVCpu));
6868
6869 /* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */
6870 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6871 AssertRC(rc);
6872
6873 /* The patch code uses the LSTAR as it's not used by a guest in 32-bit mode implicitly (i.e. SYSCALL is 64-bit only). */
6874 pVmxTransient->u64LStarMsr = ASMRdMsr(MSR_K8_LSTAR);
6875 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); /* pMixedCtx->msrLSTAR contains the guest's TPR,
6876 see hmR0VmxLoadGuestApicState(). */
6877 }
6878
6879#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6880 /*
6881 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
6882 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
6883 */
6884 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6885 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
6886 {
6887 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
6888 uint64_t u64HostTscAux = 0;
6889 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
6890 AssertRC(rc2);
6891 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
6892 }
6893#endif
6894
6895 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
6896 to start executing. */
6897 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
6898}
6899
6900
6901/**
6902 * Performs some essential restoration of state after running guest code in
6903 * VT-x.
6904 *
6905 * @param pVM Pointer to the VM.
6906 * @param pVCpu Pointer to the VMCPU.
6907 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6908 * out-of-sync. Make sure to update the required fields
6909 * before using them.
6910 * @param pVmxTransient Pointer to the VMX transient structure.
6911 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
6912 *
6913 * @remarks Called with interrupts disabled.
6914 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
6915 * unconditionally when it is safe to do so.
6916 */
6917DECLINLINE(void) hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
6918{
6919 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6920 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
6921
6922 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
6923 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
6924 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
6925 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
6926 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
6927
6928 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
6929 {
6930#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6931 /* Restore host's TSC_AUX. */
6932 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6933 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
6934#endif
6935 /** @todo Find a way to fix hardcoding a guestimate. */
6936 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
6937 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
6938 }
6939
6940 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
6941 Assert(!(ASMGetFlags() & X86_EFL_IF));
6942 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6943
6944 /* Restore the effects of TPR patching if any. */
6945 if (pVM->hm.s.fTPRPatchingActive)
6946 {
6947 int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6948 AssertRC(rc);
6949 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); /* MSR_K8_LSTAR contains the guest TPR. */
6950 ASMWrMsr(MSR_K8_LSTAR, pVmxTransient->u64LStarMsr);
6951 }
6952
6953 ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */
6954 pVCpu->hm.s.fResumeVM = true; /* Use VMRESUME instead of VMLAUNCH in the next run. */
6955
6956 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
6957 uint32_t uExitReason;
6958 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
6959 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
6960 AssertRC(rc);
6961 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
6962 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
6963
6964 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
6965 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
6966
6967 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
6968 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
6969 {
6970 Log4(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
6971 return;
6972 }
6973
6974 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
6975 {
6976 /* Update the guest interruptibility-state from the VMCS. */
6977 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
6978#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
6979 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6980 AssertRC(rc);
6981#endif
6982 /*
6983 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
6984 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
6985 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
6986 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
6987 */
6988 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
6989 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
6990 {
6991 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
6992 AssertRC(rc);
6993 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
6994 }
6995 }
6996}
6997
6998
6999/**
7000 * Runs the guest code using VT-x.
7001 *
7002 * @returns VBox status code.
7003 * @param pVM Pointer to the VM.
7004 * @param pVCpu Pointer to the VMCPU.
7005 * @param pCtx Pointer to the guest-CPU context.
7006 *
7007 * @remarks Called with preemption disabled.
7008 */
7009VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7010{
7011 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7012 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7013
7014 VMXTRANSIENT VmxTransient;
7015 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
7016 int rc = VERR_INTERNAL_ERROR_5;
7017 uint32_t cLoops = 0;
7018
7019 for (;; cLoops++)
7020 {
7021 Assert(!HMR0SuspendPending());
7022 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
7023 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
7024 (unsigned)RTMpCpuId(), cLoops));
7025
7026 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
7027 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
7028 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
7029 if (rc != VINF_SUCCESS)
7030 break;
7031
7032 /*
7033 * No longjmps to ring-3 from this point on!!!
7034 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
7035 * This also disables flushing of the R0-logger instance (if any).
7036 */
7037 VMMRZCallRing3Disable(pVCpu);
7038 VMMRZCallRing3RemoveNotification(pVCpu);
7039 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
7040
7041 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
7042 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
7043
7044 /*
7045 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
7046 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
7047 */
7048 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
7049 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
7050 {
7051 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
7052 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
7053 return rc;
7054 }
7055
7056 /* Handle the VM-exit. */
7057 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
7058 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
7059 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
7060 HMVMX_START_EXIT_DISPATCH_PROF();
7061#ifdef HMVMX_USE_FUNCTION_TABLE
7062 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
7063#else
7064 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
7065#endif
7066 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
7067 if (rc != VINF_SUCCESS)
7068 break;
7069 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
7070 {
7071 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
7072 rc = VINF_EM_RAW_INTERRUPT;
7073 break;
7074 }
7075 }
7076
7077 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
7078 if (rc == VERR_EM_INTERPRETER)
7079 rc = VINF_EM_RAW_EMULATE_INSTR;
7080 else if (rc == VINF_EM_RESET)
7081 rc = VINF_EM_TRIPLE_FAULT;
7082 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
7083 return rc;
7084}
7085
7086
7087#ifndef HMVMX_USE_FUNCTION_TABLE
7088DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
7089{
7090 int rc;
7091 switch (rcReason)
7092 {
7093 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
7094 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
7095 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
7096 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
7097 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
7098 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
7099 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7100 case VMX_EXIT_XCPT_NMI: rc = hmR0VmxExitXcptNmi(pVCpu, pMixedCtx, pVmxTransient); break;
7101 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
7102 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
7103 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7104 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
7105 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
7106 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
7107 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
7108 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7109 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7110 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
7111 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
7112 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
7113 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
7114 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
7115 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
7116 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
7117 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
7118 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7119 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7120 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
7121 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
7122 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
7123 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
7124 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
7125 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
7126
7127 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
7128 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7129 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
7130 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
7131 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7132 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7133 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
7134 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
7135 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
7136
7137 case VMX_EXIT_VMCALL:
7138 case VMX_EXIT_VMCLEAR:
7139 case VMX_EXIT_VMLAUNCH:
7140 case VMX_EXIT_VMPTRLD:
7141 case VMX_EXIT_VMPTRST:
7142 case VMX_EXIT_VMREAD:
7143 case VMX_EXIT_VMRESUME:
7144 case VMX_EXIT_VMWRITE:
7145 case VMX_EXIT_VMXOFF:
7146 case VMX_EXIT_VMXON:
7147 case VMX_EXIT_INVEPT:
7148 case VMX_EXIT_INVVPID:
7149 case VMX_EXIT_VMFUNC:
7150 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
7151 break;
7152 default:
7153 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
7154 break;
7155 }
7156 return rc;
7157}
7158#endif
7159
7160#ifdef DEBUG
7161/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
7162# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
7163 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
7164
7165# define HMVMX_ASSERT_PREEMPT_CPUID() \
7166 do \
7167 { \
7168 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
7169 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
7170 } while (0)
7171
7172# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
7173 do { \
7174 AssertPtr(pVCpu); \
7175 AssertPtr(pMixedCtx); \
7176 AssertPtr(pVmxTransient); \
7177 Assert(pVmxTransient->fVMEntryFailed == false); \
7178 Assert(ASMIntAreEnabled()); \
7179 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
7180 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
7181 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
7182 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
7183 if (VMMR0IsLogFlushDisabled(pVCpu)) \
7184 HMVMX_ASSERT_PREEMPT_CPUID(); \
7185 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
7186 } while (0)
7187
7188# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
7189 do { \
7190 Log4Func(("\n")); \
7191 } while(0)
7192#else /* Release builds */
7193# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
7194# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
7195#endif
7196
7197
7198/**
7199 * Advances the guest RIP after reading it from the VMCS.
7200 *
7201 * @returns VBox status code.
7202 * @param pVCpu Pointer to the VMCPU.
7203 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7204 * out-of-sync. Make sure to update the required fields
7205 * before using them.
7206 * @param pVmxTransient Pointer to the VMX transient structure.
7207 *
7208 * @remarks No-long-jump zone!!!
7209 */
7210DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7211{
7212 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7213 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7214 AssertRCReturn(rc, rc);
7215
7216 pMixedCtx->rip += pVmxTransient->cbInstr;
7217 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7218 return rc;
7219}
7220
7221
7222/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7223/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7224/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7225/**
7226 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7227 */
7228HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7229{
7230 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7231 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
7232 /* 32-bit Windows hosts (4 cores) has trouble with this; causes higher interrupt latency. */
7233#if HC_ARCH_BITS == 64 && defined(VBOX_WITH_VMMR0_DISABLE_PREEMPTION)
7234 Assert(ASMIntAreEnabled());
7235 return VINF_SUCCESS;
7236#else
7237 return VINF_EM_RAW_INTERRUPT;
7238#endif
7239}
7240
7241
7242/**
7243 * VM-exit handler for exceptions and NMIs (VMX_EXIT_XCPT_NMI).
7244 */
7245HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7246{
7247 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7248 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
7249
7250 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
7251 AssertRCReturn(rc, rc);
7252
7253 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
7254 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
7255 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7256
7257 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7258 {
7259 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7260 return VINF_EM_RAW_INTERRUPT;
7261 }
7262
7263 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
7264 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
7265 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
7266 {
7267 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7268 return VINF_SUCCESS;
7269 }
7270 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
7271 {
7272 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7273 return rc;
7274 }
7275
7276 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
7277 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
7278 switch (uIntrType)
7279 {
7280 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
7281 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7282 /* no break */
7283 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
7284 {
7285 switch (uVector)
7286 {
7287 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
7288 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
7289 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
7290 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
7291 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
7292 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
7293#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7294 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
7295 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7296 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
7297 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7298 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7299 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7300 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
7301 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7302 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
7303 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7304#endif
7305 default:
7306 {
7307 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7308 AssertRCReturn(rc, rc);
7309
7310 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
7311 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
7312 {
7313 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
7314 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
7315 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7316 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
7317 AssertRCReturn(rc, rc);
7318 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
7319 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
7320 0 /* GCPtrFaultAddress */);
7321 AssertRCReturn(rc, rc);
7322 }
7323 else
7324 {
7325 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
7326 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
7327 }
7328 break;
7329 }
7330 }
7331 break;
7332 }
7333
7334 case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT:
7335 default:
7336 {
7337 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
7338 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
7339 break;
7340 }
7341 }
7342 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7343 return rc;
7344}
7345
7346
7347/**
7348 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7349 */
7350HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7351{
7352 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7353
7354 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7355 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7356 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7357 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7358 AssertRCReturn(rc, rc);
7359
7360 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
7361 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
7362 return VINF_SUCCESS;
7363}
7364
7365
7366/**
7367 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7368 */
7369HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7370{
7371 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7372 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7373 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7374}
7375
7376
7377/**
7378 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7379 */
7380HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7381{
7382 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7383 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
7384 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7385}
7386
7387
7388/**
7389 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7390 */
7391HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7392{
7393 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7394 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
7395 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7396}
7397
7398
7399/**
7400 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7401 */
7402HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7403{
7404 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7405 PVM pVM = pVCpu->CTX_SUFF(pVM);
7406 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7407 if (RT_LIKELY(rc == VINF_SUCCESS))
7408 {
7409 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7410 Assert(pVmxTransient->cbInstr == 2);
7411 }
7412 else
7413 {
7414 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
7415 rc = VERR_EM_INTERPRETER;
7416 }
7417 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
7418 return rc;
7419}
7420
7421
7422/**
7423 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7424 */
7425HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7426{
7427 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7428 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
7429 AssertRCReturn(rc, rc);
7430
7431 if (pMixedCtx->cr4 & X86_CR4_SMXE)
7432 return VINF_EM_RAW_EMULATE_INSTR;
7433
7434 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
7435 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7436}
7437
7438
7439/**
7440 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7441 */
7442HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7443{
7444 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7445 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7446 AssertRCReturn(rc, rc);
7447
7448 PVM pVM = pVCpu->CTX_SUFF(pVM);
7449 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7450 if (RT_LIKELY(rc == VINF_SUCCESS))
7451 {
7452 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7453 Assert(pVmxTransient->cbInstr == 2);
7454 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7455 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
7456 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7457 }
7458 else
7459 {
7460 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
7461 rc = VERR_EM_INTERPRETER;
7462 }
7463 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7464 return rc;
7465}
7466
7467
7468/**
7469 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7470 */
7471HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7472{
7473 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7474 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7475 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
7476 AssertRCReturn(rc, rc);
7477
7478 PVM pVM = pVCpu->CTX_SUFF(pVM);
7479 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
7480 if (RT_LIKELY(rc == VINF_SUCCESS))
7481 {
7482 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7483 Assert(pVmxTransient->cbInstr == 3);
7484 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7485 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
7486 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7487 }
7488 else
7489 {
7490 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
7491 rc = VERR_EM_INTERPRETER;
7492 }
7493 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7494 return rc;
7495}
7496
7497
7498/**
7499 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7500 */
7501HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7502{
7503 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7504 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7505 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
7506 AssertRCReturn(rc, rc);
7507
7508 PVM pVM = pVCpu->CTX_SUFF(pVM);
7509 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7510 if (RT_LIKELY(rc == VINF_SUCCESS))
7511 {
7512 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7513 Assert(pVmxTransient->cbInstr == 2);
7514 }
7515 else
7516 {
7517 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7518 rc = VERR_EM_INTERPRETER;
7519 }
7520 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
7521 return rc;
7522}
7523
7524
7525/**
7526 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7527 */
7528HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7529{
7530 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7531 PVM pVM = pVCpu->CTX_SUFF(pVM);
7532 Assert(!pVM->hm.s.fNestedPaging);
7533
7534 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7535 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
7536 AssertRCReturn(rc, rc);
7537
7538 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
7539 rc = VBOXSTRICTRC_VAL(rc2);
7540 if (RT_LIKELY(rc == VINF_SUCCESS))
7541 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7542 else
7543 {
7544 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
7545 pVmxTransient->uExitQualification, rc));
7546 }
7547 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
7548 return rc;
7549}
7550
7551
7552/**
7553 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7554 */
7555HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7556{
7557 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7558 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7559 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7560 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7561 AssertRCReturn(rc, rc);
7562
7563 PVM pVM = pVCpu->CTX_SUFF(pVM);
7564 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7565 if (RT_LIKELY(rc == VINF_SUCCESS))
7566 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7567 else
7568 {
7569 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
7570 rc = VERR_EM_INTERPRETER;
7571 }
7572 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
7573 return rc;
7574}
7575
7576
7577/**
7578 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7579 */
7580HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7581{
7582 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7583 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7584 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7585 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7586 AssertRCReturn(rc, rc);
7587
7588 PVM pVM = pVCpu->CTX_SUFF(pVM);
7589 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7590 rc = VBOXSTRICTRC_VAL(rc2);
7591 if (RT_LIKELY( rc == VINF_SUCCESS
7592 || rc == VINF_EM_HALT))
7593 {
7594 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7595 AssertRCReturn(rc3, rc3);
7596
7597 if ( rc == VINF_EM_HALT
7598 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
7599 {
7600 rc = VINF_SUCCESS;
7601 }
7602 }
7603 else
7604 {
7605 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
7606 rc = VERR_EM_INTERPRETER;
7607 }
7608 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
7609 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
7610 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
7611 return rc;
7612}
7613
7614
7615/**
7616 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
7617 */
7618HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7619{
7620 /*
7621 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
7622 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
7623 * executing VMCALL in VMX root operation. If we get here something funny is going on.
7624 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
7625 */
7626 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7627 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7628}
7629
7630
7631/**
7632 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
7633 */
7634HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7635{
7636 /*
7637 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
7638 * root operation. If we get there there is something funny going on.
7639 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
7640 */
7641 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7642 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7643}
7644
7645
7646/**
7647 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
7648 */
7649HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7650{
7651 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
7652 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7653 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7654}
7655
7656
7657/**
7658 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
7659 */
7660HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7661{
7662 /*
7663 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
7664 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
7665 * See Intel spec. 25.3 "Other Causes of VM-exits".
7666 */
7667 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7668 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7669}
7670
7671
7672/**
7673 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
7674 * VM-exit.
7675 */
7676HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7677{
7678 /*
7679 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM. See Intel spec. "33.14.1 Default Treatment of
7680 * SMI Delivery" and "29.3 VMX Instructions" for "VMXON". It is -NOT- blocked in VMX non-root operation so we can potentially
7681 * still get these exits. See Intel spec. "23.8 Restrictions on VMX operation".
7682 */
7683 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7684 return VINF_SUCCESS; /** @todo r=ramshankar: correct?. */
7685}
7686
7687
7688/**
7689 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7690 * VM-exit.
7691 */
7692HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7693{
7694 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7695 return VINF_EM_RESET;
7696}
7697
7698
7699/**
7700 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7701 */
7702HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7703{
7704 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7705 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
7706 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7707 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7708 AssertRCReturn(rc, rc);
7709
7710 pMixedCtx->rip++;
7711 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7712 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
7713 rc = VINF_SUCCESS;
7714 else
7715 rc = VINF_EM_HALT;
7716
7717 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
7718 return rc;
7719}
7720
7721
7722/**
7723 * VM-exit handler for instructions that result in a #UD exception delivered to the guest.
7724 */
7725HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7726{
7727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7728 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
7729 return VINF_SUCCESS;
7730}
7731
7732
7733/**
7734 * VM-exit handler for expiry of the VMX preemption timer.
7735 */
7736HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7737{
7738 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7739
7740 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
7741 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7742
7743 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7744 PVM pVM = pVCpu->CTX_SUFF(pVM);
7745 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7746 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
7747 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7748}
7749
7750
7751/**
7752 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7753 */
7754HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7755{
7756 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7757
7758 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
7759 /** @todo check if XSETBV is supported by the recompiler. */
7760 return VERR_EM_INTERPRETER;
7761}
7762
7763
7764/**
7765 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7766 */
7767HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7768{
7769 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7770
7771 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
7772 /** @todo implement EMInterpretInvpcid() */
7773 return VERR_EM_INTERPRETER;
7774}
7775
7776
7777/**
7778 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
7779 * Error VM-exit.
7780 */
7781HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7782{
7783 uint32_t uIntrState;
7784 HMVMXHCUINTREG uHCReg;
7785 uint64_t u64Val;
7786 uint32_t u32Val;
7787
7788 int rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7789 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
7790 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7791 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
7792 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7793 AssertRCReturn(rc, rc);
7794
7795 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
7796 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7797 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7798 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
7799
7800 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
7801 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
7802 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
7803 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
7804 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
7805 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7806 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
7807 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
7808 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
7809 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7810 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7811 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7812
7813 PVM pVM = pVCpu->CTX_SUFF(pVM);
7814 HMDumpRegs(pVM, pVCpu, pMixedCtx);
7815
7816 return VERR_VMX_INVALID_GUEST_STATE;
7817}
7818
7819
7820/**
7821 * VM-exit handler for VM-entry failure due to an MSR-load
7822 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
7823 */
7824HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7825{
7826 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7827 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7828}
7829
7830
7831/**
7832 * VM-exit handler for VM-entry failure due to a machine-check event
7833 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
7834 */
7835HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7836{
7837 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7838 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7839}
7840
7841
7842/**
7843 * VM-exit handler for all undefined reasons. Should never ever happen.. in
7844 * theory.
7845 */
7846HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7847{
7848 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
7849 return VERR_VMX_UNDEFINED_EXIT_CODE;
7850}
7851
7852
7853/**
7854 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
7855 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
7856 * Conditional VM-exit.
7857 */
7858HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7859{
7860 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7861
7862 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
7863 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
7864 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
7865 return VERR_EM_INTERPRETER;
7866 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7867 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7868}
7869
7870
7871/**
7872 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
7873 */
7874HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7875{
7876 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7877
7878 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
7879 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
7880 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
7881 return VERR_EM_INTERPRETER;
7882 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7883 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7884}
7885
7886
7887/**
7888 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7889 */
7890HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7891{
7892 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7893
7894 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
7895 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7896 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7897 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7898 AssertRCReturn(rc, rc);
7899
7900 PVM pVM = pVCpu->CTX_SUFF(pVM);
7901 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7902 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
7903 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
7904 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7905
7906 if (RT_LIKELY(rc == VINF_SUCCESS))
7907 {
7908 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7909 Assert(pVmxTransient->cbInstr == 2);
7910 }
7911 return rc;
7912}
7913
7914
7915/**
7916 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7917 */
7918HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7919{
7920 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7921 PVM pVM = pVCpu->CTX_SUFF(pVM);
7922 int rc = VINF_SUCCESS;
7923
7924 /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
7925 if ( pVM->hm.s.fTPRPatchingActive
7926 && pMixedCtx->ecx == MSR_K8_LSTAR)
7927 {
7928 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* Requires EFER but it's always up-to-date. */
7929 if ((pMixedCtx->eax & 0xff) != pVmxTransient->u8GuestTpr)
7930 {
7931 rc = PDMApicSetTPR(pVCpu, pMixedCtx->eax & 0xff);
7932 AssertRC(rc);
7933 }
7934
7935 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7936 Assert(pVmxTransient->cbInstr == 2);
7937 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7938 return VINF_SUCCESS;
7939 }
7940
7941 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
7942 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7943 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7944 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7945 AssertRCReturn(rc, rc);
7946 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
7947
7948 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7949 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
7950 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7951
7952 if (RT_LIKELY(rc == VINF_SUCCESS))
7953 {
7954 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7955
7956 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7957 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
7958 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
7959 {
7960 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_APIC_STATE);
7961 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7962 }
7963 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
7964 {
7965 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
7966 AssertRCReturn(rc, rc);
7967 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
7968 }
7969 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7970 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7971
7972 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
7973 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
7974 {
7975 switch (pMixedCtx->ecx)
7976 {
7977 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
7978 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
7979 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
7980 case MSR_K8_FS_BASE: /* no break */
7981 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
7982 case MSR_K8_KERNEL_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; break;
7983 }
7984 }
7985#ifdef VBOX_STRICT
7986 else
7987 {
7988 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7989 switch (pMixedCtx->ecx)
7990 {
7991 case MSR_IA32_SYSENTER_CS:
7992 case MSR_IA32_SYSENTER_EIP:
7993 case MSR_IA32_SYSENTER_ESP:
7994 case MSR_K8_FS_BASE:
7995 case MSR_K8_GS_BASE:
7996 {
7997 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
7998 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7999 }
8000
8001 case MSR_K8_LSTAR:
8002 case MSR_K6_STAR:
8003 case MSR_K8_SF_MASK:
8004 case MSR_K8_TSC_AUX:
8005 case MSR_K8_KERNEL_GS_BASE:
8006 {
8007 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8008 pMixedCtx->ecx));
8009 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8010 }
8011 }
8012 }
8013#endif /* VBOX_STRICT */
8014 }
8015 return rc;
8016}
8017
8018
8019/**
8020 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8021 */
8022HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8023{
8024 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8025
8026 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
8027 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
8028 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
8029 return VERR_EM_INTERPRETER;
8030 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8031 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8032}
8033
8034
8035/**
8036 * VM-exit handler for when the TPR value is lowered below the specified
8037 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8038 */
8039HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8040{
8041 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8042 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
8043
8044 /*
8045 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
8046 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
8047 * resume guest execution.
8048 */
8049 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
8050 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
8051 return VINF_SUCCESS;
8052}
8053
8054
8055/**
8056 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8057 * VM-exit.
8058 *
8059 * @retval VINF_SUCCESS when guest execution can continue.
8060 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
8061 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8062 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
8063 * recompiler.
8064 */
8065HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8066{
8067 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8068 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
8069 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8070 AssertRCReturn(rc, rc);
8071
8072 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
8073 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
8074 PVM pVM = pVCpu->CTX_SUFF(pVM);
8075 switch (uAccessType)
8076 {
8077 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
8078 {
8079#if 0
8080 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
8081 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8082#else
8083 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8084 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8085 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8086#endif
8087 AssertRCReturn(rc, rc);
8088
8089 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8090 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
8091 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
8092 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
8093
8094 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
8095 {
8096 case 0: /* CR0 */
8097 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
8098 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8099 break;
8100 case 2: /* C2 **/
8101 /* Nothing to do here, CR2 it's not part of the VMCS. */
8102 break;
8103 case 3: /* CR3 */
8104 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
8105 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
8106 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
8107 break;
8108 case 4: /* CR4 */
8109 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
8110 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
8111 break;
8112 case 8: /* CR8 */
8113 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
8114 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
8115 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
8116 break;
8117 default:
8118 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
8119 break;
8120 }
8121
8122 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
8123 break;
8124 }
8125
8126 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
8127 {
8128 /* EMInterpretCRxRead() requires EFER MSR, CS. */
8129 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8130 AssertRCReturn(rc, rc);
8131 Assert( !pVM->hm.s.fNestedPaging
8132 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
8133 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
8134
8135 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8136 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
8137 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
8138
8139 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8140 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
8141 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
8142 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8143 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
8144 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
8145 break;
8146 }
8147
8148 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
8149 {
8150 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8151 AssertRCReturn(rc, rc);
8152 rc = EMInterpretCLTS(pVM, pVCpu);
8153 AssertRCReturn(rc, rc);
8154 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8155 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
8156 Log4(("CRX CLTS write rc=%d\n", rc));
8157 break;
8158 }
8159
8160 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
8161 {
8162 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8163 AssertRCReturn(rc, rc);
8164 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
8165 if (RT_LIKELY(rc == VINF_SUCCESS))
8166 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8167 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
8168 Log4(("CRX LMSW write rc=%d\n", rc));
8169 break;
8170 }
8171
8172 default:
8173 {
8174 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
8175 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
8176 }
8177 }
8178
8179 /* Validate possible error codes. */
8180 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
8181 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
8182 if (RT_SUCCESS(rc))
8183 {
8184 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8185 AssertRCReturn(rc2, rc2);
8186 }
8187
8188 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
8189 return rc;
8190}
8191
8192
8193/**
8194 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8195 * VM-exit.
8196 */
8197HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8198{
8199 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8200 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
8201
8202 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8203 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8204 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8205 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
8206 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
8207 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
8208 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8209 AssertRCReturn(rc, rc);
8210
8211 Log4(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8212
8213 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8214 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
8215 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
8216 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
8217 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
8218 bool fIOString = (VMX_EXIT_QUALIFICATION_IO_STRING(pVmxTransient->uExitQualification) == 1);
8219 Assert(uIOWidth == 0 || uIOWidth == 1 || uIOWidth == 3);
8220
8221 /* I/O operation lookup arrays. */
8222 static const uint32_t s_aIOSize[4] = { 1, 2, 0, 4 }; /* Size of the I/O Accesses. */
8223 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
8224
8225 const uint32_t cbSize = s_aIOSize[uIOWidth];
8226 const uint32_t cbInstr = pVmxTransient->cbInstr;
8227 PVM pVM = pVCpu->CTX_SUFF(pVM);
8228 if (fIOString)
8229 {
8230 /* INS/OUTS - I/O String instruction. */
8231 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8232 /** @todo for now manually disassemble later optimize by getting the fields from
8233 * the VMCS. VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR contains the flat pointer
8234 * operand of the instruction. VMX_VMCS32_RO_EXIT_INSTR_INFO contains
8235 * segment prefix info. */
8236 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
8237 if (RT_SUCCESS(rc))
8238 {
8239 if (fIOWrite)
8240 {
8241 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
8242 (DISCPUMODE)pDis->uAddrMode, cbSize);
8243 rc = VBOXSTRICTRC_VAL(rc2);
8244 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
8245 }
8246 else
8247 {
8248 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
8249 (DISCPUMODE)pDis->uAddrMode, cbSize);
8250 rc = VBOXSTRICTRC_VAL(rc2);
8251 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
8252 }
8253 }
8254 else
8255 {
8256 AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));
8257 rc = VINF_EM_RAW_EMULATE_INSTR;
8258 }
8259 }
8260 else
8261 {
8262 /* IN/OUT - I/O instruction. */
8263 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
8264 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(pVmxTransient->uExitQualification));
8265 if (fIOWrite)
8266 {
8267 VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbSize);
8268 rc = VBOXSTRICTRC_VAL(rc2);
8269 if (rc == VINF_IOM_R3_IOPORT_WRITE)
8270 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8271 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
8272 }
8273 else
8274 {
8275 uint32_t u32Result = 0;
8276 VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbSize);
8277 rc = VBOXSTRICTRC_VAL(rc2);
8278 if (IOM_SUCCESS(rc))
8279 {
8280 /* Save result of I/O IN instr. in AL/AX/EAX. */
8281 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8282 }
8283 else if (rc == VINF_IOM_R3_IOPORT_READ)
8284 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8285 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
8286 }
8287 }
8288
8289 if (IOM_SUCCESS(rc))
8290 {
8291 pMixedCtx->rip += cbInstr;
8292 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8293 if (RT_LIKELY(rc == VINF_SUCCESS))
8294 {
8295 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx); /* For DR7. */
8296 AssertRCReturn(rc, rc);
8297
8298 /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
8299 if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK)
8300 {
8301 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
8302 for (unsigned i = 0; i < 4; i++)
8303 {
8304 uint32_t uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
8305 if ( ( uIOPort >= pMixedCtx->dr[i]
8306 && uIOPort < pMixedCtx->dr[i] + uBPLen)
8307 && (pMixedCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
8308 && (pMixedCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
8309 {
8310 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8311 uint64_t uDR6 = ASMGetDR6();
8312
8313 /* Clear all breakpoint status flags and set the one we just hit. */
8314 uDR6 &= ~(X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3);
8315 uDR6 |= (uint64_t)RT_BIT(i);
8316
8317 /*
8318 * Note: AMD64 Architecture Programmer's Manual 13.1:
8319 * Bits 15:13 of the DR6 register is never cleared by the processor and must
8320 * be cleared by software after the contents have been read.
8321 */
8322 ASMSetDR6(uDR6);
8323
8324 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8325 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8326
8327 /* Paranoia. */
8328 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */
8329 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
8330 pMixedCtx->dr[7] |= 0x400; /* MB1. */
8331
8332 /* Resync DR7 */
8333 /** @todo probably cheaper to just reload DR7, nothing else needs changing. */
8334 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8335
8336 /* Set #DB to be injected into the VM and continue guest execution. */
8337 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
8338 break;
8339 }
8340 }
8341 }
8342 }
8343 }
8344
8345#ifdef DEBUG
8346 if (rc == VINF_IOM_R3_IOPORT_READ)
8347 Assert(!fIOWrite);
8348 else if (rc == VINF_IOM_R3_IOPORT_WRITE)
8349 Assert(fIOWrite);
8350 else
8351 {
8352 AssertMsg( RT_FAILURE(rc)
8353 || rc == VINF_SUCCESS
8354 || rc == VINF_EM_RAW_EMULATE_INSTR
8355 || rc == VINF_EM_RAW_GUEST_TRAP
8356 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
8357 }
8358#endif
8359
8360 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
8361 return rc;
8362}
8363
8364
8365/**
8366 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8367 * VM-exit.
8368 */
8369HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8370{
8371 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8372
8373 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8374 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8375 AssertRCReturn(rc, rc);
8376 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
8377 {
8378 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
8379 AssertRCReturn(rc, rc);
8380 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
8381 {
8382 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
8383
8384 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
8385 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
8386 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
8387 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
8388 {
8389 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
8390 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
8391
8392 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
8393 Assert(!pVCpu->hm.s.Event.fPending);
8394 pVCpu->hm.s.Event.fPending = true;
8395 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
8396 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
8397 AssertRCReturn(rc, rc);
8398 if (fErrorCodeValid)
8399 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
8400 else
8401 pVCpu->hm.s.Event.u32ErrCode = 0;
8402 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
8403 && uVector == X86_XCPT_PF)
8404 {
8405 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
8406 }
8407
8408 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
8409 }
8410 }
8411 }
8412
8413 /** @todo Emulate task switch someday, currently just going back to ring-3 for
8414 * emulation. */
8415 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8416 return VERR_EM_INTERPRETER;
8417}
8418
8419
8420/**
8421 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8422 */
8423HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8424{
8425 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8426 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
8427 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
8428 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8429 AssertRCReturn(rc, rc);
8430 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
8431 return VINF_EM_DBG_STOP;
8432}
8433
8434
8435/**
8436 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8437 */
8438HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8439{
8440 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8441
8442 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8443 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8444 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8445 return VINF_SUCCESS;
8446 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8447 return rc;
8448
8449#if 0
8450 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
8451 * just sync the whole thing. */
8452 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8453#else
8454 /* Aggressive state sync. for now. */
8455 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8456 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8457 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8458#endif
8459 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8460 AssertRCReturn(rc, rc);
8461
8462 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8463 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
8464 switch (uAccessType)
8465 {
8466 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8467 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8468 {
8469 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8470 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
8471 {
8472 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8473 }
8474
8475 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
8476 GCPhys &= PAGE_BASE_GC_MASK;
8477 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
8478 PVM pVM = pVCpu->CTX_SUFF(pVM);
8479 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
8480 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
8481
8482 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
8483 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
8484 CPUMCTX2CORE(pMixedCtx), GCPhys);
8485 rc = VBOXSTRICTRC_VAL(rc2);
8486 Log4(("ApicAccess rc=%d\n", rc));
8487 if ( rc == VINF_SUCCESS
8488 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8489 || rc == VERR_PAGE_NOT_PRESENT)
8490 {
8491 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8492 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8493 rc = VINF_SUCCESS;
8494 }
8495 break;
8496 }
8497
8498 default:
8499 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
8500 rc = VINF_EM_RAW_EMULATE_INSTR;
8501 break;
8502 }
8503
8504 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
8505 return rc;
8506}
8507
8508
8509/**
8510 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8511 * VM-exit.
8512 */
8513HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8514{
8515 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8516
8517 /* We should -not- get this VM-exit if the guest is debugging. */
8518 if (CPUMIsGuestDebugStateActive(pVCpu))
8519 {
8520 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8521 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8522 }
8523
8524 int rc = VERR_INTERNAL_ERROR_5;
8525 if ( !DBGFIsStepping(pVCpu)
8526 && !CPUMIsHyperDebugStateActive(pVCpu))
8527 {
8528 /* Don't intercept MOV DRx. */
8529 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
8530 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8531 AssertRCReturn(rc, rc);
8532
8533 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8534 PVM pVM = pVCpu->CTX_SUFF(pVM);
8535 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
8536 AssertRC(rc);
8537 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8538
8539#ifdef VBOX_WITH_STATISTICS
8540 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8541 AssertRCReturn(rc, rc);
8542 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8543 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8544 else
8545 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8546#endif
8547 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
8548 return VINF_SUCCESS;
8549 }
8550
8551 /*
8552 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
8553 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
8554 */
8555 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8556 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8557 AssertRCReturn(rc, rc);
8558
8559 PVM pVM = pVCpu->CTX_SUFF(pVM);
8560 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8561 {
8562 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8563 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
8564 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
8565 if (RT_SUCCESS(rc))
8566 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8567 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8568 }
8569 else
8570 {
8571 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8572 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
8573 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
8574 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8575 }
8576
8577 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8578 if (RT_SUCCESS(rc))
8579 {
8580 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8581 AssertRCReturn(rc2, rc2);
8582 }
8583 return rc;
8584}
8585
8586
8587/**
8588 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8589 * Conditional VM-exit.
8590 */
8591HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8592{
8593 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8594 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8595
8596 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8597 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8598 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8599 return VINF_SUCCESS;
8600 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8601 return rc;
8602
8603 RTGCPHYS GCPhys = 0;
8604 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8605
8606#if 0
8607 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8608#else
8609 /* Aggressive state sync. for now. */
8610 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8611 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8612 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8613#endif
8614 AssertRCReturn(rc, rc);
8615
8616 /*
8617 * If we succeed, resume guest execution.
8618 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8619 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8620 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8621 * weird case. See @bugref{6043}.
8622 */
8623 PVM pVM = pVCpu->CTX_SUFF(pVM);
8624 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
8625 rc = VBOXSTRICTRC_VAL(rc2);
8626 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
8627 if ( rc == VINF_SUCCESS
8628 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8629 || rc == VERR_PAGE_NOT_PRESENT)
8630 {
8631 /* Successfully handled MMIO operation. */
8632 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8633 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8634 return VINF_SUCCESS;
8635 }
8636 return rc;
8637}
8638
8639
8640/**
8641 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8642 * VM-exit.
8643 */
8644HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8645{
8646 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8647 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8648
8649 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8650 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8651 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8652 return VINF_SUCCESS;
8653 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8654 return rc;
8655
8656 RTGCPHYS GCPhys = 0;
8657 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8658 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8659#if 0
8660 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8661#else
8662 /* Aggressive state sync. for now. */
8663 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8664 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8665 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8666#endif
8667 AssertRCReturn(rc, rc);
8668
8669 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
8670 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
8671
8672 RTGCUINT uErrorCode = 0;
8673 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
8674 uErrorCode |= X86_TRAP_PF_ID;
8675 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
8676 uErrorCode |= X86_TRAP_PF_RW;
8677 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
8678 uErrorCode |= X86_TRAP_PF_P;
8679
8680 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8681
8682 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
8683 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8684
8685 /* Handle the pagefault trap for the nested shadow table. */
8686 PVM pVM = pVCpu->CTX_SUFF(pVM);
8687 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
8688 TRPMResetTrap(pVCpu);
8689
8690 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8691 if ( rc == VINF_SUCCESS
8692 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8693 || rc == VERR_PAGE_NOT_PRESENT)
8694 {
8695 /* Successfully synced our nested page tables. */
8696 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
8697 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
8698 return VINF_SUCCESS;
8699 }
8700
8701 Log4(("EPT return to ring-3 rc=%d\n"));
8702 return rc;
8703}
8704
8705
8706/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8707/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
8708/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8709/**
8710 * VM-exit exception handler for #MF (Math Fault: floating point exception).
8711 */
8712static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8713{
8714 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8715 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8716
8717 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8718 AssertRCReturn(rc, rc);
8719
8720 if (!(pMixedCtx->cr0 & X86_CR0_NE))
8721 {
8722 /* Old-style FPU error reporting needs some extra work. */
8723 /** @todo don't fall back to the recompiler, but do it manually. */
8724 return VERR_EM_INTERPRETER;
8725 }
8726 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8727 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8728 return rc;
8729}
8730
8731
8732/**
8733 * VM-exit exception handler for #BP (Breakpoint exception).
8734 */
8735static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8736{
8737 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8738 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8739
8740 /** @todo Try optimize this by not saving the entire guest state unless
8741 * really needed. */
8742 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8743 AssertRCReturn(rc, rc);
8744
8745 PVM pVM = pVCpu->CTX_SUFF(pVM);
8746 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8747 if (rc == VINF_EM_RAW_GUEST_TRAP)
8748 {
8749 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8750 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8751 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8752 AssertRCReturn(rc, rc);
8753
8754 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8755 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8756 }
8757
8758 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
8759 return rc;
8760}
8761
8762
8763/**
8764 * VM-exit exception handler for #DB (Debug exception).
8765 */
8766static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8767{
8768 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8769 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8770
8771 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8772 AssertRCReturn(rc, rc);
8773
8774 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
8775 uint64_t uDR6 = X86_DR6_INIT_VAL;
8776 uDR6 |= (pVmxTransient->uExitQualification
8777 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
8778 PVM pVM = pVCpu->CTX_SUFF(pVM);
8779 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
8780 if (rc == VINF_EM_RAW_GUEST_TRAP)
8781 {
8782 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
8783 pMixedCtx->dr[6] = uDR6;
8784
8785 if (CPUMIsGuestDebugStateActive(pVCpu))
8786 ASMSetDR6(pMixedCtx->dr[6]);
8787
8788 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
8789
8790 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8791 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8792
8793 /* Paranoia. */
8794 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */
8795 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
8796 pMixedCtx->dr[7] |= 0x400; /* MB1. */
8797
8798 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
8799 AssertRCReturn(rc,rc);
8800
8801 int rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8802 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8803 rc2 |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8804 AssertRCReturn(rc2, rc2);
8805 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8806 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8807 rc = VINF_SUCCESS;
8808 }
8809
8810 return rc;
8811}
8812
8813
8814/**
8815 * VM-exit exception handler for #NM (Device-not-available exception: floating
8816 * point exception).
8817 */
8818static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8819{
8820 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8821
8822#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8823 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8824#endif
8825
8826 /* We require CR0 and EFER. EFER is always up-to-date. */
8827 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8828 AssertRCReturn(rc, rc);
8829
8830 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
8831 PVM pVM = pVCpu->CTX_SUFF(pVM);
8832 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8833 if (rc == VINF_SUCCESS)
8834 {
8835 Assert(CPUMIsGuestFPUStateActive(pVCpu));
8836 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8837 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
8838 return VINF_SUCCESS;
8839 }
8840
8841 /* Forward #NM to the guest. */
8842 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
8843 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8844 AssertRCReturn(rc, rc);
8845 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8846 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
8847 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
8848 return rc;
8849}
8850
8851
8852/**
8853 * VM-exit exception handler for #GP (General-protection exception).
8854 *
8855 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
8856 */
8857static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8858{
8859 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8860 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
8861
8862 int rc = VERR_INTERNAL_ERROR_5;
8863 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8864 {
8865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8866 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
8867 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8868 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8869 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8870 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8871 AssertRCReturn(rc, rc);
8872 Log4(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
8873 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
8874 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8875 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8876 return rc;
8877#else
8878 /* We don't intercept #GP. */
8879 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
8880 return VERR_VMX_UNEXPECTED_EXCEPTION;
8881#endif
8882 }
8883
8884 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
8885 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
8886
8887 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
8888 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8889 AssertRCReturn(rc, rc);
8890
8891 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8892 uint32_t cbOp = 0;
8893 PVM pVM = pVCpu->CTX_SUFF(pVM);
8894 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
8895 if (RT_SUCCESS(rc))
8896 {
8897 rc = VINF_SUCCESS;
8898 Assert(cbOp == pDis->cbInstr);
8899 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8900 switch (pDis->pCurInstr->uOpcode)
8901 {
8902 case OP_CLI:
8903 {
8904 pMixedCtx->eflags.Bits.u1IF = 0;
8905 pMixedCtx->rip += pDis->cbInstr;
8906 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8907 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
8908 break;
8909 }
8910
8911 case OP_STI:
8912 {
8913 pMixedCtx->eflags.Bits.u1IF = 1;
8914 pMixedCtx->rip += pDis->cbInstr;
8915 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
8916 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
8917 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8918 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
8919 break;
8920 }
8921
8922 case OP_HLT:
8923 {
8924 rc = VINF_EM_HALT;
8925 pMixedCtx->rip += pDis->cbInstr;
8926 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8927 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
8928 break;
8929 }
8930
8931 case OP_POPF:
8932 {
8933 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8934 uint32_t cbParm = 0;
8935 uint32_t uMask = 0;
8936 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8937 {
8938 cbParm = 4;
8939 uMask = 0xffffffff;
8940 }
8941 else
8942 {
8943 cbParm = 2;
8944 uMask = 0xffff;
8945 }
8946
8947 /* Get the stack pointer & pop the contents of the stack onto EFlags. */
8948 RTGCPTR GCPtrStack = 0;
8949 X86EFLAGS uEflags;
8950 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8951 &GCPtrStack);
8952 if (RT_SUCCESS(rc))
8953 {
8954 Assert(sizeof(uEflags.u32) >= cbParm);
8955 uEflags.u32 = 0;
8956 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u32, cbParm);
8957 }
8958 if (RT_FAILURE(rc))
8959 {
8960 rc = VERR_EM_INTERPRETER;
8961 break;
8962 }
8963 Log4(("POPF %x -> %#RX64 mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
8964 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8965 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
8966 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
8967 pMixedCtx->eflags.Bits.u1RF = 0;
8968 pMixedCtx->esp += cbParm;
8969 pMixedCtx->esp &= uMask;
8970 pMixedCtx->rip += pDis->cbInstr;
8971 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
8972 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
8973 break;
8974 }
8975
8976 case OP_PUSHF:
8977 {
8978 uint32_t cbParm = 0;
8979 uint32_t uMask = 0;
8980 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8981 {
8982 cbParm = 4;
8983 uMask = 0xffffffff;
8984 }
8985 else
8986 {
8987 cbParm = 2;
8988 uMask = 0xffff;
8989 }
8990
8991 /* Get the stack pointer & push the contents of eflags onto the stack. */
8992 RTGCPTR GCPtrStack = 0;
8993 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
8994 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
8995 if (RT_FAILURE(rc))
8996 {
8997 rc = VERR_EM_INTERPRETER;
8998 break;
8999 }
9000 X86EFLAGS uEflags;
9001 uEflags = pMixedCtx->eflags;
9002 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
9003 uEflags.Bits.u1RF = 0;
9004 uEflags.Bits.u1VM = 0;
9005
9006 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u, cbParm);
9007 if (RT_FAILURE(rc))
9008 {
9009 rc = VERR_EM_INTERPRETER;
9010 break;
9011 }
9012 Log4(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack));
9013 pMixedCtx->esp -= cbParm;
9014 pMixedCtx->esp &= uMask;
9015 pMixedCtx->rip += pDis->cbInstr;
9016 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
9017 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
9018 break;
9019 }
9020
9021 case OP_IRET:
9022 {
9023 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
9024 * instruction reference. */
9025 RTGCPTR GCPtrStack = 0;
9026 uint32_t uMask = 0xffff;
9027 uint16_t aIretFrame[3];
9028 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
9029 {
9030 rc = VERR_EM_INTERPRETER;
9031 break;
9032 }
9033 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
9034 &GCPtrStack);
9035 if (RT_SUCCESS(rc))
9036 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
9037 if (RT_FAILURE(rc))
9038 {
9039 rc = VERR_EM_INTERPRETER;
9040 break;
9041 }
9042 pMixedCtx->eip = 0;
9043 pMixedCtx->ip = aIretFrame[0];
9044 pMixedCtx->cs.Sel = aIretFrame[1];
9045 pMixedCtx->cs.ValidSel = aIretFrame[1];
9046 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
9047 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
9048 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
9049 pMixedCtx->sp += sizeof(aIretFrame);
9050 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
9051 | HM_CHANGED_GUEST_RFLAGS;
9052 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
9053 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
9054 break;
9055 }
9056
9057 case OP_INT:
9058 {
9059 uint16_t uVector = pDis->Param1.uValue & 0xff;
9060 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
9061 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
9062 break;
9063 }
9064
9065 case OP_INTO:
9066 {
9067 if (pMixedCtx->eflags.Bits.u1OF)
9068 {
9069 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
9070 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
9071 }
9072 break;
9073 }
9074
9075 default:
9076 {
9077 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
9078 EMCODETYPE_SUPERVISOR);
9079 rc = VBOXSTRICTRC_VAL(rc2);
9080 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
9081 Log4(("#GP rc=%Rrc\n", rc));
9082 break;
9083 }
9084 }
9085 }
9086 else
9087 rc = VERR_EM_INTERPRETER;
9088
9089 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
9090 ("#GP Unexpected rc=%Rrc\n", rc));
9091 return rc;
9092}
9093
9094
9095/**
9096 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
9097 * the exception reported in the VMX transient structure back into the VM.
9098 *
9099 * @remarks Requires uExitIntrInfo in the VMX transient structure to be
9100 * up-to-date.
9101 */
9102static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9103{
9104 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9105
9106 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
9107 hmR0VmxCheckExitDueToEventDelivery(). */
9108 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
9109 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9110 AssertRCReturn(rc, rc);
9111 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
9112 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9113 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
9114 return VINF_SUCCESS;
9115}
9116
9117
9118/**
9119 * VM-exit exception handler for #PF (Page-fault exception).
9120 */
9121static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9122{
9123 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9124 PVM pVM = pVCpu->CTX_SUFF(pVM);
9125 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9126 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9127 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
9128 AssertRCReturn(rc, rc);
9129
9130#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
9131 if (pVM->hm.s.fNestedPaging)
9132 {
9133 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
9134 {
9135 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
9136 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
9137 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9138 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
9139 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
9140 }
9141 else
9142 {
9143 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
9144 pVCpu->hm.s.Event.fPending = false; /* A vectoring #PF. */
9145 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
9146 Log4(("Pending #DF due to vectoring #PF. NP\n"));
9147 }
9148 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
9149 return rc;
9150 }
9151#else
9152 Assert(!pVM->hm.s.fNestedPaging);
9153#endif
9154
9155#ifdef VBOX_HM_WITH_GUEST_PATCHING
9156 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9157 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9158 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9159 AssertRCReturn(rc, rc);
9160 /* Shortcut for APIC TPR access, only for 32-bit guests. */
9161 if ( pVM->hm.s.fTRPPatchingAllowed
9162 && pVM->hm.s.pGuestPatchMem
9163 && (pVmxTransient->uExitQualification & 0xfff) == 0x80 /* TPR offset */
9164 && !(pVmxTransient->uExitIntrErrorCode & X86_TRAP_PF_P) /* Page not present */
9165 && CPUMGetGuestCPL(pVCpu) == 0 /* Requires CR0, EFLAGS, segments. */
9166 && !CPUMIsGuestInLongModeEx(pMixedCtx) /* Requires EFER. */
9167 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
9168 {
9169 RTGCPHYS GCPhys;
9170 RTGCPHYS GCPhysApicBase = (pMixedCtx->msrApicBase & PAGE_BASE_GC_MASK);
9171 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pVmxTransient->uExitQualification, NULL /* pfFlags */, &GCPhys);
9172 if ( rc == VINF_SUCCESS
9173 && GCPhys == GCPhysApicBase)
9174 {
9175 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9176 AssertRCReturn(rc, rc);
9177
9178 /* Only attempt to patch the instruction once. */
9179 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pMixedCtx->eip);
9180 if (!pPatch)
9181 return VINF_EM_HM_PATCH_TPR_INSTR;
9182 }
9183 }
9184#endif
9185
9186 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9187 AssertRCReturn(rc, rc);
9188
9189 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
9190 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
9191
9192 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
9193 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
9194 (RTGCPTR)pVmxTransient->uExitQualification);
9195
9196 Log4(("#PF: rc=%Rrc\n", rc));
9197 if (rc == VINF_SUCCESS)
9198 {
9199 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
9200 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
9201 * memory? We don't update the whole state here... */
9202 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9203 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9204 TRPMResetTrap(pVCpu);
9205 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
9206 return rc;
9207 }
9208 else if (rc == VINF_EM_RAW_GUEST_TRAP)
9209 {
9210 if (!pVmxTransient->fVectoringPF)
9211 {
9212 /* It's a guest page fault and needs to be reflected to the guest. */
9213 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
9214 TRPMResetTrap(pVCpu);
9215 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
9216 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
9217 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9218 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
9219 }
9220 else
9221 {
9222 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
9223 TRPMResetTrap(pVCpu);
9224 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF for replace it with #DF. */
9225 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
9226 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
9227 }
9228
9229 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
9230 return VINF_SUCCESS;
9231 }
9232
9233 TRPMResetTrap(pVCpu);
9234 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
9235 return rc;
9236}
9237
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette