VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 45475

Last change on this file since 45475 was 45475, checked in by vboxsync, 12 years ago

VMM/VMMR0/HM: comment and todo for problem earlier seen today with assert screwing up VMX state somehow.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 369.2 KB
Line 
1/* $Id: HMVMXR0.cpp 45475 2013-04-10 21:19:00Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HWVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#ifdef DEBUG_ramshankar
38#define VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS
39#endif
40
41/*******************************************************************************
42* Defined Constants And Macros *
43*******************************************************************************/
44#if defined(RT_ARCH_AMD64)
45# define VMX_IS_64BIT_HOST_MODE() (true)
46#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
47# define VMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
48#else
49# define VMX_IS_64BIT_HOST_MODE() (false)
50#endif
51
52#define VMX_SEL_UNUSABLE RT_BIT(16)
53
54/**
55 * Updated-guest-state flags.
56 */
57#define VMX_UPDATED_GUEST_FPU RT_BIT(0)
58#define VMX_UPDATED_GUEST_RIP RT_BIT(1)
59#define VMX_UPDATED_GUEST_RSP RT_BIT(2)
60#define VMX_UPDATED_GUEST_RFLAGS RT_BIT(3)
61#define VMX_UPDATED_GUEST_CR0 RT_BIT(4)
62#define VMX_UPDATED_GUEST_CR3 RT_BIT(5)
63#define VMX_UPDATED_GUEST_CR4 RT_BIT(6)
64#define VMX_UPDATED_GUEST_GDTR RT_BIT(7)
65#define VMX_UPDATED_GUEST_IDTR RT_BIT(8)
66#define VMX_UPDATED_GUEST_LDTR RT_BIT(9)
67#define VMX_UPDATED_GUEST_TR RT_BIT(10)
68#define VMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(11)
69#define VMX_UPDATED_GUEST_DEBUG RT_BIT(12)
70#define VMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(13)
71#define VMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(14)
72#define VMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(15)
73#define VMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(16)
74#define VMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(17)
75#define VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(18)
76#define VMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(19)
77#define VMX_UPDATED_GUEST_APIC_STATE RT_BIT(20)
78#define VMX_UPDATED_GUEST_ALL ( VMX_UPDATED_GUEST_FPU \
79 | VMX_UPDATED_GUEST_RIP \
80 | VMX_UPDATED_GUEST_RSP \
81 | VMX_UPDATED_GUEST_RFLAGS \
82 | VMX_UPDATED_GUEST_CR0 \
83 | VMX_UPDATED_GUEST_CR3 \
84 | VMX_UPDATED_GUEST_CR4 \
85 | VMX_UPDATED_GUEST_GDTR \
86 | VMX_UPDATED_GUEST_IDTR \
87 | VMX_UPDATED_GUEST_LDTR \
88 | VMX_UPDATED_GUEST_TR \
89 | VMX_UPDATED_GUEST_SEGMENT_REGS \
90 | VMX_UPDATED_GUEST_DEBUG \
91 | VMX_UPDATED_GUEST_FS_BASE_MSR \
92 | VMX_UPDATED_GUEST_GS_BASE_MSR \
93 | VMX_UPDATED_GUEST_SYSENTER_CS_MSR \
94 | VMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
95 | VMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
96 | VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
97 | VMX_UPDATED_GUEST_ACTIVITY_STATE \
98 | VMX_UPDATED_GUEST_APIC_STATE)
99
100/**
101 * Flags to skip redundant reads of some common VMCS fields.
102 */
103#define VMX_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
104#define VMX_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
105#define VMX_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
106#define VMX_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
107#define VMX_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
108#define VMX_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
109
110/*
111 * Exception bitmap mask for real-mode guests (real-on-v86). We need to intercept all exceptions manually (except #PF).
112 * #NM is also handled spearetely, see hmR0VmxLoadGuestControlRegs(). #PF need not be intercepted even in real-mode if
113 * we have Nested Paging support.
114 */
115#define VMX_REAL_MODE_XCPT_BITMAP ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
116 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
117 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
118 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
119 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
120 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
121 | RT_BIT(X86_XCPT_XF))
122
123/* Maximum VM-instruction error number. */
124#define VMX_INSTR_ERROR_MAX 28
125
126/*******************************************************************************
127* Structures and Typedefs *
128*******************************************************************************/
129/**
130 * A state structure for holding miscellaneous information across
131 * VMX non-root operation and restored after the transition.
132 */
133typedef struct VMXTRANSIENT
134{
135 /** The host's rflags/eflags. */
136 RTCCUINTREG uEFlags;
137 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
138 uint64_t u64LStarMsr;
139 /** The guest's TPR value used for TPR shadowing. */
140 uint8_t u8GuestTpr;
141
142 /** The basic VM-exit reason. */
143 uint16_t uExitReason;
144 /** The VM-exit exit qualification. */
145 RTGCUINTPTR uExitQualification;
146 /** The VM-exit interruption error code. */
147 uint32_t uExitIntrErrorCode;
148
149 /** The VM-exit interruption-information field. */
150 uint32_t uExitIntrInfo;
151 /** Whether the VM-entry failed or not. */
152 bool fVMEntryFailed;
153 /** The VM-exit instruction-length field. */
154 uint32_t cbInstr;
155
156 /** The VM-entry interruption-information field. */
157 uint32_t uEntryIntrInfo;
158 /** The VM-entry exception error code field. */
159 uint32_t uEntryXcptErrorCode;
160 /** The VM-entry instruction length field. */
161 uint32_t cbEntryInstr;
162
163 /** IDT-vectoring information field. */
164 uint32_t uIdtVectoringInfo;
165 /** IDT-vectoring error code. */
166 uint32_t uIdtVectoringErrorCode;
167
168 /** Mask of currently read VMCS fields; VMX_TRANSIENT_*. */
169 uint32_t fVmcsFieldsRead;
170 /** Whether TSC-offsetting should be setup before VM-entry. */
171 bool fUpdateTscOffsettingAndPreemptTimer;
172 /** Whether the VM-exit was caused by a page-fault during delivery of a
173 * contributary exception or a page-fault. */
174 bool fVectoringPF;
175} VMXTRANSIENT, *PVMXTRANSIENT;
176
177/**
178 * MSR-bitmap read permissions.
179 */
180typedef enum VMXMSREXITREAD
181{
182 /** Reading this MSR causes a VM-exit. */
183 VMXMSREXIT_INTERCEPT_READ = 0xb,
184 /** Reading this MSR does not cause a VM-exit. */
185 VMXMSREXIT_PASSTHRU_READ
186} VMXMSREXITREAD;
187
188/**
189 * MSR-bitmap write permissions.
190 */
191typedef enum VMXMSREXITWRITE
192{
193 /** Writing to this MSR causes a VM-exit. */
194 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
195 /** Writing to this MSR does not cause a VM-exit. */
196 VMXMSREXIT_PASSTHRU_WRITE
197} VMXMSREXITWRITE;
198
199/*******************************************************************************
200* Internal Functions *
201*******************************************************************************/
202static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
203static int hmR0VmxInjectEventVmcs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo,
204 uint32_t cbInstr, uint32_t u32ErrCode);
205#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
206static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
207#endif
208#if 0
209DECLINLINE(int) hmR0VmxHandleExit(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
210 unsigned rcReason);
211#endif
212
213static DECLCALLBACK(int) hmR0VmxExitXcptNmi(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
214static DECLCALLBACK(int) hmR0VmxExitExtInt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
215static DECLCALLBACK(int) hmR0VmxExitTripleFault(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
216static DECLCALLBACK(int) hmR0VmxExitInitSignal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
217static DECLCALLBACK(int) hmR0VmxExitSipi(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
218static DECLCALLBACK(int) hmR0VmxExitIoSmi(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
219static DECLCALLBACK(int) hmR0VmxExitSmi(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
220static DECLCALLBACK(int) hmR0VmxExitIntWindow(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
221static DECLCALLBACK(int) hmR0VmxExitNmiWindow(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
222static DECLCALLBACK(int) hmR0VmxExitTaskSwitch(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
223static DECLCALLBACK(int) hmR0VmxExitCpuid(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
224static DECLCALLBACK(int) hmR0VmxExitGetsec(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
225static DECLCALLBACK(int) hmR0VmxExitHlt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
226static DECLCALLBACK(int) hmR0VmxExitInvd(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
227static DECLCALLBACK(int) hmR0VmxExitInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
228static DECLCALLBACK(int) hmR0VmxExitRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
229static DECLCALLBACK(int) hmR0VmxExitRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
230static DECLCALLBACK(int) hmR0VmxExitRsm(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
231static DECLCALLBACK(int) hmR0VmxExitInjectXcptUD(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
232static DECLCALLBACK(int) hmR0VmxExitMovCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
233static DECLCALLBACK(int) hmR0VmxExitMovDRx(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
234static DECLCALLBACK(int) hmR0VmxExitIoInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
235static DECLCALLBACK(int) hmR0VmxExitRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
236static DECLCALLBACK(int) hmR0VmxExitWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
237static DECLCALLBACK(int) hmR0VmxExitErrInvalidGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
238static DECLCALLBACK(int) hmR0VmxExitErrMsrLoad(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
239static DECLCALLBACK(int) hmR0VmxExitErrUndefined(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
240static DECLCALLBACK(int) hmR0VmxExitMwait(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
241static DECLCALLBACK(int) hmR0VmxExitMtf(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
242static DECLCALLBACK(int) hmR0VmxExitMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
243static DECLCALLBACK(int) hmR0VmxExitPause(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
244static DECLCALLBACK(int) hmR0VmxExitErrMachineCheck(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
245static DECLCALLBACK(int) hmR0VmxExitTprBelowThreshold(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
246static DECLCALLBACK(int) hmR0VmxExitApicAccess(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
247static DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
248static DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
249static DECLCALLBACK(int) hmR0VmxExitEptViolation(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
250static DECLCALLBACK(int) hmR0VmxExitEptMisconfig(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
251static DECLCALLBACK(int) hmR0VmxExitRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
252static DECLCALLBACK(int) hmR0VmxExitPreemptionTimer(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
253static DECLCALLBACK(int) hmR0VmxExitWbinvd(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
254static DECLCALLBACK(int) hmR0VmxExitXsetbv(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
255static DECLCALLBACK(int) hmR0VmxExitRdrand(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
256static DECLCALLBACK(int) hmR0VmxExitInvpcid(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
257static DECLCALLBACK(int) hmR0VmxExitXcptNM(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
258static DECLCALLBACK(int) hmR0VmxExitXcptPF(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
259static DECLCALLBACK(int) hmR0VmxExitXcptMF(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
260static DECLCALLBACK(int) hmR0VmxExitXcptDB(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
261static DECLCALLBACK(int) hmR0VmxExitXcptBP(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
262static DECLCALLBACK(int) hmR0VmxExitXcptGP(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
263static DECLCALLBACK(int) hmR0VmxExitXcptGeneric(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
264
265/*******************************************************************************
266* Global Variables *
267*******************************************************************************/
268/** @todo Move this to hm_vmx.h. */
269/**
270 * VM-exit handler.
271 *
272 * @returns VBox status code.
273 * @param pVM Pointer to the VM.
274 * @param pVCpu Pointer to the VMCPU.
275 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
276 * out-of-sync. Make sure to update the required
277 * fields before using them.
278 * @param pVmxTransient Pointer to the VMX-transient structure.
279 */
280typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
281/** Pointer to VM-exit handler. */
282typedef FNVMEXITHANDLER *PFNVMEXITHANDLER;
283
284static const PFNVMEXITHANDLER s_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
285{
286 /* 00 VMX_EXIT_XCPT_NMI */ hmR0VmxExitXcptNmi,
287 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
288 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
289 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
290 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
291 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
292 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
293 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
294 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
295 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
296 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
297 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
298 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
299 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
300 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
301 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
302 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
303 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
304 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitInjectXcptUD,
305 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitInjectXcptUD,
306 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitInjectXcptUD,
307 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitInjectXcptUD,
308 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitInjectXcptUD,
309 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitInjectXcptUD,
310 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitInjectXcptUD,
311 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitInjectXcptUD,
312 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitInjectXcptUD,
313 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitInjectXcptUD,
314 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
315 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
316 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
317 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
318 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
319 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
320 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
321 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
322 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
323 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
324 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
325 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
326 /* 40 UNDEFINED */ hmR0VmxExitPause,
327 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
328 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
329 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
330 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
331 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
332 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
333 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
334 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
335 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
336 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitInjectXcptUD,
337 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
338 /* 52 VMX_EXIT_PREEMPTION_TIMER */ hmR0VmxExitPreemptionTimer,
339 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitInjectXcptUD,
340 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
341 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
342 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
343 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
344 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
345 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitInjectXcptUD
346};
347
348static const char* const s_apszVmxInstrErrors[VMX_INSTR_ERROR_MAX + 1] =
349{
350 /* 0 */ "(Not Used)",
351 /* 1 */ "VMCALL executed in VMX root operation.",
352 /* 2 */ "VMCLEAR with invalid physical address.",
353 /* 3 */ "VMCLEAR with VMXON pointer.",
354 /* 4 */ "VMLAUNCH with non-clear VMCS.",
355 /* 5 */ "VMRESUME with non-launched VMCS.",
356 /* 6 */ "VMRESUME after VMXOFF",
357 /* 7 */ "VM entry with invalid control fields.",
358 /* 8 */ "VM entry with invalid host state fields.",
359 /* 9 */ "VMPTRLD with invalid physical address.",
360 /* 10 */ "VMPTRLD with VMXON pointer.",
361 /* 11 */ "VMPTRLD with incorrect revision identifier.",
362 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
363 /* 13 */ "VMWRITE to read-only VMCS component.",
364 /* 14 */ "(Not Used)",
365 /* 15 */ "VMXON executed in VMX root operation.",
366 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
367 /* 17 */ "VM entry with non-launched executing VMCS.",
368 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
369 /* 19 */ "VMCALL with non-clear VMCS.",
370 /* 20 */ "VMCALL with invalid VM-exit control fields.",
371 /* 21 */ "(Not Used)",
372 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
373 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
374 /* 24 */ "VMCALL with invalid SMM-monitor features.",
375 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
376 /* 26 */ "VM entry with events blocked by MOV SS.",
377 /* 27 */ "(Not Used)",
378 /* 28 */ "Invalid operand to INVEPT/INVVPID."
379};
380
381
382/**
383 * Updates the VM's last error record. If there was a VMX instruction error,
384 * reads the error data from the VMCS and updates VCPU's last error record as
385 * well.
386 *
387 * @param pVM Pointer to the VM.
388 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
389 * VERR_VMX_UNABLE_TO_START_VM or
390 * VERR_VMX_INVALID_VMCS_FIELD).
391 * @param rc The error code.
392 */
393static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
394{
395 AssertPtr(pVM);
396 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
397 || rc == VERR_VMX_UNABLE_TO_START_VM)
398 {
399 AssertPtrReturnVoid(pVCpu);
400 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
401 }
402 pVM->hm.s.lLastError = rc;
403}
404
405
406/**
407 * Reads the VM-entry interruption-information field from the VMCS into the VMX
408 * transient structure.
409 *
410 * @returns VBox status code.
411 * @param pVmxTransient Pointer to the VMX transient structure.
412 *
413 * @remarks No-long-jump zone!!!
414 */
415DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
416{
417 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
418 AssertRCReturn(rc, rc);
419 return VINF_SUCCESS;
420}
421
422
423/**
424 * Reads the VM-entry exception error code field from the VMCS into
425 * the VMX transient structure.
426 *
427 * @returns VBox status code.
428 * @param pVmxTransient Pointer to the VMX transient structure.
429 *
430 * @remarks No-long-jump zone!!!
431 */
432DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
433{
434 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
435 AssertRCReturn(rc, rc);
436 return VINF_SUCCESS;
437}
438
439
440/**
441 * Reads the VM-entry exception error code field from the VMCS into
442 * the VMX transient structure.
443 *
444 * @returns VBox status code.
445 * @param pVCpu Pointer to the VMCPU.
446 * @param pVmxTransient Pointer to the VMX transient structure.
447 *
448 * @remarks No-long-jump zone!!!
449 */
450DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
451{
452 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
453 AssertRCReturn(rc, rc);
454 return VINF_SUCCESS;
455}
456
457
458/**
459 * Reads the VM-exit interruption-information field from the VMCS into the VMX
460 * transient structure.
461 *
462 * @returns VBox status code.
463 * @param pVCpu Pointer to the VMCPU.
464 * @param pVmxTransient Pointer to the VMX transient structure.
465 */
466DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
467{
468 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_INTERRUPTION_INFO))
469 {
470 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
471 AssertRCReturn(rc, rc);
472 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_EXIT_INTERRUPTION_INFO;
473 }
474 return VINF_SUCCESS;
475}
476
477
478/**
479 * Reads the VM-exit interruption error code from the VMCS into the VMX
480 * transient structure.
481 *
482 * @returns VBox status code.
483 * @param pVCpu Pointer to the VMCPU.
484 * @param pVmxTransient Pointer to the VMX transient structure.
485 */
486DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
487{
488 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
489 {
490 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
491 AssertRCReturn(rc, rc);
492 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
493 }
494 return VINF_SUCCESS;
495}
496
497
498/**
499 * Reads the VM-exit instruction length field from the VMCS into the VMX
500 * transient structure.
501 *
502 * @returns VBox status code.
503 * @param pVCpu Pointer to the VMCPU.
504 * @param pVmxTransient Pointer to the VMX transient structure.
505 */
506DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
507{
508 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_INSTR_LEN))
509 {
510 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
511 AssertRCReturn(rc, rc);
512 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_EXIT_INSTR_LEN;
513 }
514 return VINF_SUCCESS;
515}
516
517
518/**
519 * Reads the exit qualification from the VMCS into the VMX transient structure.
520 *
521 * @returns VBox status code.
522 * @param pVCpu Pointer to the VMCPU.
523 * @param pVmxTransient Pointer to the VMX transient structure.
524 */
525DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
526{
527 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_QUALIFICATION))
528 {
529 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
530 AssertRCReturn(rc, rc);
531 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_EXIT_QUALIFICATION;
532 }
533 return VINF_SUCCESS;
534}
535
536
537/**
538 * Reads the IDT-vectoring information field from the VMCS into the VMX
539 * transient structure.
540 *
541 * @returns VBox status code.
542 * @param pVmxTransient Pointer to the VMX transient structure.
543 *
544 * @remarks No-long-jump zone!!!
545 */
546DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
547{
548 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_IDT_VECTORING_INFO))
549 {
550 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
551 AssertRCReturn(rc, rc);
552 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_IDT_VECTORING_INFO;
553 }
554 return VINF_SUCCESS;
555}
556
557
558/**
559 * Reads the IDT-vectoring error code from the VMCS into the VMX
560 * transient structure.
561 *
562 * @returns VBox status code.
563 * @param pVmxTransient Pointer to the VMX transient structure.
564 */
565DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
566{
567 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_IDT_VECTORING_ERROR_CODE))
568 {
569 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
570 AssertRCReturn(rc, rc);
571 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_IDT_VECTORING_ERROR_CODE;
572 }
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Enters VMX root mode operation on the current CPU.
579 *
580 * @returns VBox status code.
581 * @param pVM Pointer to the VM (optional, can be NULL, after
582 * a resume).
583 * @param HCPhysCpuPage Physical address of the VMXON region.
584 * @param pvCpuPage Pointer to the VMXON region.
585 */
586DECLINLINE(int) hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
587{
588 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
589 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
590
591 if (pVM)
592 {
593 /* Write the VMCS revision dword to the VMXON region. */
594 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
595 }
596
597 /* Disable interrupts. Interrupts handlers might, in theory, change CR4. */
598 RTCCUINTREG fFlags = ASMIntDisableFlags();
599
600 /* Enable the VMX bit in CR4 if necessary. */
601 RTCCUINTREG uCr4 = ASMGetCR4();
602 if (!(uCr4 & X86_CR4_VMXE))
603 ASMSetCR4(uCr4 | X86_CR4_VMXE);
604
605 /* Enter VMX root mode. */
606 int rc = VMXEnable(HCPhysCpuPage); /** @todo This would #GP(0) if we are already in VMX root mode... try skip it? */
607 if (RT_FAILURE(rc))
608 ASMSetCR4(uCr4);
609
610 /* Restore interrupts. */
611 ASMSetFlags(fFlags);
612 return rc;
613}
614
615
616/**
617 * Exits VMX root mode operation on the current CPU.
618 *
619 * @returns VBox status code.
620 */
621static int hmR0VmxLeaveRootMode(void)
622{
623 /* Disable interrupts. Interrupt handlers might, in theory, change CR4. */
624 RTCCUINTREG fFlags = ASMIntDisableFlags();
625 int rc = VINF_SUCCESS;
626
627 /* If we're for some reason not in VMX root mode, then don't leave it. */
628 if (ASMGetCR4() & X86_CR4_VMXE)
629 {
630 /* Exit VMX root mode and clear the VMX bit in CR4 */
631 VMXDisable();
632 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
633 }
634 else
635 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
636
637 /* Restore interrupts. */
638 ASMSetFlags(fFlags);
639 return rc;
640}
641
642
643/**
644 * Allocates and maps one physically contiguous page. The allocated page is
645 * zero'd out. (Used by various VT-x structures).
646 *
647 * @returns IPRT status code.
648 * @param pMemObj Pointer to the ring-0 memory object.
649 * @param ppVirt Where to store the virtual address of the
650 * allocation.
651 * @param pPhys Where to store the physical address of the
652 * allocation.
653 */
654DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
655{
656 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
657 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
658 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
659
660 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
661 if (RT_FAILURE(rc))
662 return rc;
663 *ppVirt = RTR0MemObjAddress(*pMemObj);
664 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
665 ASMMemZero32(*ppVirt, PAGE_SIZE);
666 return VINF_SUCCESS;
667}
668
669
670/**
671 * Frees and unmaps an allocated physical page.
672 *
673 * @param pMemObj Pointer to the ring-0 memory object.
674 * @param ppVirt Where to re-initialize the virtual address of
675 * allocation as 0.
676 * @param pHCPhys Where to re-initialize the physical address of the
677 * allocation as 0.
678 */
679DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
680{
681 AssertPtr(pMemObj);
682 AssertPtr(ppVirt);
683 AssertPtr(pHCPhys);
684 if (*pMemObj != NIL_RTR0MEMOBJ)
685 {
686 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
687 AssertRC(rc);
688 *pMemObj = NIL_RTR0MEMOBJ;
689 *ppVirt = 0;
690 *pHCPhys = 0;
691 }
692}
693
694
695/**
696 * Worker function to free VT-x related structures.
697 *
698 * @returns IPRT status code.
699 * @param pVM Pointer to the VM.
700 */
701static void hmR0VmxStructsFree(PVM pVM)
702{
703 for (VMCPUID i = 0; i < pVM->cCpus; i++)
704 {
705 PVMCPU pVCpu = &pVM->aCpus[i];
706 AssertPtr(pVCpu);
707
708 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
709 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
710
711 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
712 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
713
714 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
715 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
716 }
717
718 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
719#ifdef VBOX_WITH_CRASHDUMP_MAGIC
720 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
721#endif
722}
723
724
725/**
726 * Worker function to allocate VT-x related VM structures.
727 *
728 * @returns IPRT status code.
729 * @param pVM Pointer to the VM.
730 */
731static int hmR0VmxStructsAlloc(PVM pVM)
732{
733 /*
734 * Initialize members up-front so we can cleanup properly on allocation failure.
735 */
736#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
737 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
738 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
739 pVM->hm.s.vmx.HCPhys##a_Name = 0;
740
741#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
742 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
743 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
744 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
745
746#ifdef VBOX_WITH_CRASHDUMP_MAGIC
747 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
748#endif
749 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
750
751 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
752 for (VMCPUID i = 0; i < pVM->cCpus; i++)
753 {
754 PVMCPU pVCpu = &pVM->aCpus[i];
755 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
756 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
757 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
758 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
759 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
760 }
761#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
762#undef VMXLOCAL_INIT_VM_MEMOBJ
763
764 /*
765 * Allocate all the VT-x structures.
766 */
767 int rc = VINF_SUCCESS;
768#ifdef VBOX_WITH_CRASHDUMP_MAGIC
769 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
770 if (RT_FAILURE(rc))
771 goto cleanup;
772 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
773 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
774#endif
775
776 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
777 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
778 {
779 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
780 &pVM->hm.s.vmx.HCPhysApicAccess);
781 if (RT_FAILURE(rc))
782 goto cleanup;
783 }
784
785 /*
786 * Initialize per-VCPU VT-x structures.
787 */
788 for (VMCPUID i =0; i < pVM->cCpus; i++)
789 {
790 PVMCPU pVCpu = &pVM->aCpus[i];
791 AssertPtr(pVCpu);
792
793 /* Allocate the VM control structure (VMCS). */
794 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
795 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
796 if (RT_FAILURE(rc))
797 goto cleanup;
798
799 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
800 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
801 {
802 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
803 &pVCpu->hm.s.vmx.HCPhysVirtApic);
804 if (RT_FAILURE(rc))
805 goto cleanup;
806 }
807
808 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
809 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
810 {
811 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
812 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
813 if (RT_FAILURE(rc))
814 goto cleanup;
815 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
816 }
817
818 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
819 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
820 if (RT_FAILURE(rc))
821 goto cleanup;
822
823 /* Allocate the VM-exit MSR-load page for the host MSRs. */
824 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
825 if (RT_FAILURE(rc))
826 goto cleanup;
827 }
828
829 return VINF_SUCCESS;
830
831cleanup:
832 hmR0VmxStructsFree(pVM);
833 return rc;
834}
835
836
837/**
838 * Does global VT-x initialization (called during module initialization).
839 *
840 * @returns VBox status code.
841 */
842VMMR0DECL(int) VMXR0GlobalInit(void)
843{
844 /* Setup the main VM exit handlers. */
845 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(s_apfnVMExitHandlers));
846#ifdef DEBUG
847 for (unsigned i = 0; i < RT_ELEMENTS(s_apfnVMExitHandlers); i++)
848 Assert(s_apfnVMExitHandlers[i]);
849#endif
850 return VINF_SUCCESS;
851}
852
853
854/**
855 * Does global VT-x termination (called during module termination).
856 */
857VMMR0DECL(void) VMXR0GlobalTerm()
858{
859 /* Nothing to do currently. */
860}
861
862
863/**
864 * Sets up and activates VT-x on the current CPU.
865 *
866 * @returns VBox status code.
867 * @param pCpu Pointer to the global CPU info struct.
868 * @param pVM Pointer to the VM (can be NULL after a host resume
869 * operation).
870 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
871 * fEnabledByHost is true).
872 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
873 * @a fEnabledByHost is true).
874 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
875 * enable VT-x/AMD-V on the host.
876 */
877VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
878{
879 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
880 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
881
882 if (!fEnabledByHost)
883 {
884 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
885 if (RT_FAILURE(rc))
886 return rc;
887 }
888
889 /*
890 * Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that
891 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
892 * each time while reusing a VPID after hitting the MaxASID limit once.
893 */
894 if ( pVM
895 && pVM->hm.s.vmx.fVpid
896 && (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS))
897 {
898 hmR0VmxFlushVpid(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
899 pCpu->fFlushAsidBeforeUse = false;
900 }
901 else
902 pCpu->fFlushAsidBeforeUse = true;
903
904 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
905 ++pCpu->cTlbFlushes;
906
907 return VINF_SUCCESS;
908}
909
910
911/**
912 * Deactivates VT-x on the current CPU.
913 *
914 * @returns VBox status code.
915 * @param pCpu Pointer to the global CPU info struct.
916 * @param pvCpuPage Pointer to the VMXON region.
917 * @param HCPhysCpuPage Physical address of the VMXON region.
918 */
919VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
920{
921 NOREF(pCpu);
922 NOREF(pvCpuPage);
923 NOREF(HCPhysCpuPage);
924
925 hmR0VmxLeaveRootMode();
926 return VINF_SUCCESS;
927}
928
929
930/**
931 * Sets the permission bits for the specified MSR in the MSR bitmap.
932 *
933 * @param pVCpu Pointer to the VMCPU.
934 * @param ulMSR The MSR value.
935 * @param enmRead Whether reading this MSR causes a VM-exit.
936 * @param enmWrite Whether writing this MSR causes a VM-exit.
937 */
938static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, unsigned ulMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
939{
940 unsigned ulBit;
941 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
942
943 /*
944 * Layout:
945 * 0x000 - 0x3ff - Low MSR read bits
946 * 0x400 - 0x7ff - High MSR read bits
947 * 0x800 - 0xbff - Low MSR write bits
948 * 0xc00 - 0xfff - High MSR write bits
949 */
950 if (ulMsr <= 0x00001FFF)
951 {
952 /* Pentium-compatible MSRs */
953 ulBit = ulMsr;
954 }
955 else if ( ulMsr >= 0xC0000000
956 && ulMsr <= 0xC0001FFF)
957 {
958 /* AMD Sixth Generation x86 Processor MSRs */
959 ulBit = (ulMsr - 0xC0000000);
960 pbMsrBitmap += 0x400;
961 }
962 else
963 {
964 AssertMsgFailed(("Invalid MSR %lx\n", ulMsr));
965 return;
966 }
967
968 Assert(ulBit <= 0x1fff);
969 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
970 ASMBitSet(pbMsrBitmap, ulBit);
971 else
972 ASMBitClear(pbMsrBitmap, ulBit);
973
974 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
975 ASMBitSet(pbMsrBitmap + 0x800, ulBit);
976 else
977 ASMBitClear(pbMsrBitmap + 0x800, ulBit);
978}
979
980
981/**
982 * Flushes the TLB using EPT.
983 *
984 * @returns VBox status code.
985 * @param pVM Pointer to the VM.
986 * @param pVCpu Pointer to the VMCPU.
987 * @param enmFlush Type of flush.
988 */
989static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
990{
991 AssertPtr(pVM);
992 Assert(pVM->hm.s.fNestedPaging);
993
994 LogFlowFunc(("pVM=%p pVCpu=%p enmFlush=%d\n", pVM, pVCpu, enmFlush));
995
996 uint64_t descriptor[2];
997 descriptor[0] = pVCpu->hm.s.vmx.GCPhysEPTP;
998 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
999
1000 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1001 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu->hm.s.vmx.GCPhysEPTP, rc));
1002 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1003}
1004
1005
1006/**
1007 * Flushes the TLB using VPID.
1008 *
1009 * @returns VBox status code.
1010 * @param pVM Pointer to the VM.
1011 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1012 * enmFlush).
1013 * @param enmFlush Type of flush.
1014 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1015 * on @a enmFlush).
1016 */
1017static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1018{
1019 AssertPtr(pVM);
1020 Assert(pVM->hm.s.vmx.fVpid);
1021
1022 uint64_t descriptor[2];
1023 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1024 {
1025 descriptor[0] = 0;
1026 descriptor[1] = 0;
1027 }
1028 else
1029 {
1030 AssertPtr(pVCpu);
1031 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1032 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1033 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1034 descriptor[1] = GCPtr;
1035 }
1036
1037 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1038 AssertMsg(rc == VINF_SUCCESS,
1039 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1040 if ( RT_SUCCESS(rc)
1041 && pVCpu)
1042 {
1043 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1044 }
1045}
1046
1047
1048/**
1049 * Invalidates a guest page by guest virtual address. Only relevant for
1050 * EPT/VPID, otherwise there is nothing really to invalidate.
1051 *
1052 * @returns VBox status code.
1053 * @param pVM Pointer to the VM.
1054 * @param pVCpu Pointer to the VMCPU.
1055 * @param GCVirt Guest virtual address of the page to invalidate.
1056 */
1057VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1058{
1059 AssertPtr(pVM);
1060 AssertPtr(pVCpu);
1061 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1062
1063 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1064 if (!fFlushPending)
1065 {
1066 /*
1067 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1068 * See @bugref{6043} and @bugref{6177}.
1069 *
1070 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1071 * function maybe called in a loop with individual addresses.
1072 */
1073 if (pVM->hm.s.vmx.fVpid)
1074 {
1075 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1076 {
1077 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1078 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1079 }
1080 else
1081 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1082 }
1083 else if (pVM->hm.s.fNestedPaging)
1084 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1085 }
1086
1087 return VINF_SUCCESS;
1088}
1089
1090
1091/**
1092 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1093 * otherwise there is nothing really to invalidate.
1094 *
1095 * @returns VBox status code.
1096 * @param pVM Pointer to the VM.
1097 * @param pVCpu Pointer to the VMCPU.
1098 * @param GCPhys Guest physical address of the page to invalidate.
1099 */
1100VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1101{
1102 LogFlowFunc(("%RGp\n", GCPhys));
1103
1104 /*
1105 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1106 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1107 * This function might be called in a loop.
1108 */
1109 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1110 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1111 return VINF_SUCCESS;
1112}
1113
1114
1115/**
1116 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1117 * case where neither EPT nor VPID is supported by the CPU.
1118 *
1119 * @param pVM Pointer to the VM.
1120 * @param pVCpu Pointer to the VMCPU.
1121 *
1122 * @remarks Called with interrupts disabled.
1123 */
1124static DECLCALLBACK(void) hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1125{
1126 NOREF(pVM);
1127 AssertPtr(pVCpu);
1128 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1129 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1130
1131 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1132 AssertPtr(pCpu);
1133
1134 pVCpu->hm.s.TlbShootdown.cPages = 0;
1135 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1136 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1137 pVCpu->hm.s.fForceTLBFlush = false;
1138 return;
1139}
1140
1141
1142/**
1143 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1144 *
1145 * @param pVM Pointer to the VM.
1146 * @param pVCpu Pointer to the VMCPU.
1147 * @remarks All references to "ASID" in this function pertains to "VPID" in
1148 * Intel's nomenclature. The reason is, to avoid confusion in compare
1149 * statements since the host-CPU copies are named "ASID".
1150 *
1151 * @remarks Called with interrupts disabled.
1152 */
1153static DECLCALLBACK(void) hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1154{
1155 AssertPtr(pVM);
1156 AssertPtr(pVCpu);
1157 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1158 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1159 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1160
1161 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1162 AssertPtr(pCpu);
1163
1164 /*
1165 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1166 * This can happen both for start & resume due to long jumps back to ring-3.
1167 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1168 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1169 */
1170 bool fNewASID = false;
1171 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1172 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1173 {
1174 pVCpu->hm.s.fForceTLBFlush = true;
1175 fNewASID = true;
1176 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1177 }
1178
1179 /*
1180 * Check for explicit TLB shootdowns.
1181 */
1182 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1183 {
1184 pVCpu->hm.s.fForceTLBFlush = true;
1185 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1186 }
1187
1188 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1189 if (pVCpu->hm.s.fForceTLBFlush)
1190 {
1191 if (fNewASID)
1192 {
1193 ++pCpu->uCurrentAsid;
1194 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1195 {
1196 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
1197 pCpu->cTlbFlushes++;
1198 pCpu->fFlushAsidBeforeUse = true;
1199 }
1200
1201 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1202 if (pCpu->fFlushAsidBeforeUse)
1203 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1204 }
1205 else
1206 {
1207 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1208 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
1209 else
1210 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1211 }
1212
1213 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1214 pVCpu->hm.s.fForceTLBFlush = false;
1215 }
1216 else
1217 {
1218 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1219 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1220 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1221 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1222
1223 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1224 * not be executed. See hmQueueInvlPage() where it is commented
1225 * out. Support individual entry flushing someday. */
1226 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1227 {
1228 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1229
1230 /*
1231 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1232 * as supported by the CPU.
1233 */
1234 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1235 {
1236 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1237 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1238 }
1239 else
1240 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1241 }
1242 else
1243 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1244 }
1245 pVCpu->hm.s.TlbShootdown.cPages = 0;
1246 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1247
1248 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1249 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1250 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1251 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1252 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1253 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1254
1255 /* Update VMCS with the VPID. */
1256 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1257 AssertRC(rc);
1258}
1259
1260
1261/**
1262 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1263 *
1264 * @returns VBox status code.
1265 * @param pVM Pointer to the VM.
1266 * @param pVCpu Pointer to the VMCPU.
1267 *
1268 * @remarks Called with interrupts disabled.
1269 */
1270static DECLCALLBACK(void) hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1271{
1272 AssertPtr(pVM);
1273 AssertPtr(pVCpu);
1274 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1275 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1276
1277 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1278 AssertPtr(pCpu);
1279
1280 /*
1281 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1282 * This can happen both for start & resume due to long jumps back to ring-3.
1283 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1284 */
1285 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1286 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1287 {
1288 pVCpu->hm.s.fForceTLBFlush = true;
1289 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1290 }
1291
1292 /* Check for explicit TLB shootdown flushes. */
1293 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1294 {
1295 pVCpu->hm.s.fForceTLBFlush = true;
1296 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1297 }
1298
1299 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1300 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1301
1302 if (pVCpu->hm.s.fForceTLBFlush)
1303 {
1304 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1305 pVCpu->hm.s.fForceTLBFlush = false;
1306 }
1307 else
1308 {
1309 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1310 * not be executed. See hmQueueInvlPage() where it is commented
1311 * out. Support individual entry flushing someday. */
1312 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1313 {
1314 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1315 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1316 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1317 }
1318 else
1319 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1320 }
1321
1322 pVCpu->hm.s.TlbShootdown.cPages= 0;
1323 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1324}
1325
1326
1327/**
1328 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1329 *
1330 * @returns VBox status code.
1331 * @param pVM Pointer to the VM.
1332 * @param pVCpu Pointer to the VMCPU.
1333 *
1334 * @remarks Called with interrupts disabled.
1335 */
1336static DECLCALLBACK(void) hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1337{
1338 AssertPtr(pVM);
1339 AssertPtr(pVCpu);
1340 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1341 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1342
1343 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1344
1345 /*
1346 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1347 * This can happen both for start & resume due to long jumps back to ring-3.
1348 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1349 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1350 */
1351 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1352 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1353 {
1354 pVCpu->hm.s.fForceTLBFlush = true;
1355 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1356 }
1357
1358 /* Check for explicit TLB shootdown flushes. */
1359 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1360 {
1361 /*
1362 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1363 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1364 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1365 */
1366 pVCpu->hm.s.fForceTLBFlush = true;
1367 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1368 }
1369
1370 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1371 if (pVCpu->hm.s.fForceTLBFlush)
1372 {
1373 ++pCpu->uCurrentAsid;
1374 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1375 {
1376 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
1377 pCpu->fFlushAsidBeforeUse = true;
1378 pCpu->cTlbFlushes++;
1379 }
1380
1381 pVCpu->hm.s.fForceTLBFlush = false;
1382 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1383 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1384 if (pCpu->fFlushAsidBeforeUse)
1385 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1386 }
1387 else
1388 {
1389 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1390 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1391 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1392 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1393
1394 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1395 * not be executed. See hmQueueInvlPage() where it is commented
1396 * out. Support individual entry flushing someday. */
1397 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1398 {
1399 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1400 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1401 {
1402 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1403 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1404 }
1405 else
1406 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1407 }
1408 else
1409 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1410 }
1411
1412 pVCpu->hm.s.TlbShootdown.cPages = 0;
1413 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1414
1415 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1416 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1417 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1418 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1419 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1420 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1421
1422 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1423 AssertRC(rc);
1424}
1425
1426
1427/**
1428 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1429 * TLB entries from the host TLB before VM-entry.
1430 *
1431 * @returns VBox status code.
1432 * @param pVM Pointer to the VM.
1433 */
1434static int hmR0VmxSetupTaggedTlb(PVM pVM)
1435{
1436 /*
1437 * Determine optimal flush type for nested paging.
1438 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1439 * guest execution (see hmR3InitFinalizeR0()).
1440 */
1441 if (pVM->hm.s.fNestedPaging)
1442 {
1443 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1444 {
1445 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1446 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1447 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1448 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1449 else
1450 {
1451 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1452 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1453 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1454 }
1455
1456 /* Make sure the write-back cacheable memory type for EPT is supported. */
1457 if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1458 {
1459 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1460 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1461 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1462 }
1463 }
1464 else
1465 {
1466 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1467 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1468 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1469 }
1470 }
1471
1472 /*
1473 * Determine optimal flush type for VPID.
1474 */
1475 if (pVM->hm.s.vmx.fVpid)
1476 {
1477 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1478 {
1479 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1480 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1481 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1482 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1483 else
1484 {
1485 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1486 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1487 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1488 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1489 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1490 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1491 pVM->hm.s.vmx.fVpid = false;
1492 }
1493 }
1494 else
1495 {
1496 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1497 Log(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1498 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1499 pVM->hm.s.vmx.fVpid = false;
1500 }
1501 }
1502
1503 /*
1504 * Setup the handler for flushing tagged-TLBs.
1505 */
1506 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1507 pVM->hm.s.vmx.pfnFlushTaggedTlb = hmR0VmxFlushTaggedTlbBoth;
1508 else if (pVM->hm.s.fNestedPaging)
1509 pVM->hm.s.vmx.pfnFlushTaggedTlb = hmR0VmxFlushTaggedTlbEpt;
1510 else if (pVM->hm.s.vmx.fVpid)
1511 pVM->hm.s.vmx.pfnFlushTaggedTlb = hmR0VmxFlushTaggedTlbVpid;
1512 else
1513 pVM->hm.s.vmx.pfnFlushTaggedTlb = hmR0VmxFlushTaggedTlbNone;
1514 return VINF_SUCCESS;
1515}
1516
1517
1518/**
1519 * Sets up pin-based VM-execution controls in the VMCS.
1520 *
1521 * @returns VBox status code.
1522 * @param pVM Pointer to the VM.
1523 * @param pVCpu Pointer to the VMCPU.
1524 */
1525static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1526{
1527 AssertPtr(pVM);
1528 AssertPtr(pVCpu);
1529
1530 uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
1531 uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
1532
1533 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1534 | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1535 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI));
1536
1537 /* Enable the VMX preemption timer. */
1538 if (pVM->hm.s.vmx.fUsePreemptTimer)
1539 {
1540 Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER);
1541 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
1542 }
1543
1544 if ((val & zap) != val)
1545 {
1546 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1547 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
1548 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1549 }
1550
1551 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val);
1552 AssertRCReturn(rc, rc);
1553
1554 /* Update VCPU with the currently set pin-based VM-execution controls. */
1555 pVCpu->hm.s.vmx.u32PinCtls = val;
1556 return rc;
1557}
1558
1559
1560/**
1561 * Sets up processor-based VM-execution controls in the VMCS.
1562 *
1563 * @returns VBox status code.
1564 * @param pVM Pointer to the VM.
1565 * @param pVMCPU Pointer to the VMCPU.
1566 */
1567static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1568{
1569 AssertPtr(pVM);
1570 AssertPtr(pVCpu);
1571
1572 int rc = VERR_INTERNAL_ERROR_5;
1573 uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1574 uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1575
1576 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT /* HLT causes a VM-exit. */
1577 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1578 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1579 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1580 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1581 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1582 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1583
1584 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1585 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
1586 || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT))
1587 {
1588 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT combo!"));
1589 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1590 }
1591
1592 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1593 if (!pVM->hm.s.fNestedPaging)
1594 {
1595 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1596 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
1597 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
1598 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
1599 }
1600
1601 /* Use TPR shadowing if supported by the CPU. */
1602 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
1603 {
1604 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1605 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1606 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1607 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1608 AssertRCReturn(rc, rc);
1609
1610 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1611 /* CR8 writes causes a VM-exit based on TPR threshold. */
1612 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT));
1613 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT));
1614 }
1615 else
1616 {
1617 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1618 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1619 }
1620
1621 /* Use MSR-bitmaps if supported by the CPU. */
1622 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
1623 {
1624 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
1625
1626 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1627 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1628 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1629 AssertRCReturn(rc, rc);
1630
1631 /*
1632 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1633 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1634 */
1635 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1636 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1637 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1638 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1639 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1640 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1641 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1642 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1643 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1644 }
1645
1646 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1647 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1648 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1649
1650 if ((val & zap) != val)
1651 {
1652 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1653 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1654 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1655 }
1656
1657 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, val);
1658 AssertRCReturn(rc, rc);
1659
1660 /* Update VCPU with the currently set processor-based VM-execution controls. */
1661 pVCpu->hm.s.vmx.u32ProcCtls = val;
1662
1663 /*
1664 * Secondary processor-based VM-execution controls.
1665 */
1666 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1667 {
1668 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1669 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1670
1671 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1672 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1673
1674 if (pVM->hm.s.fNestedPaging)
1675 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1676 else
1677 {
1678 /*
1679 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1680 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT when INVPCID is executed by the guest.
1681 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1682 */
1683 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1684 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1685 }
1686
1687 if (pVM->hm.s.vmx.fVpid)
1688 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1689
1690 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1691 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1692
1693 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1694 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1695 * done dynamically. */
1696 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1697 {
1698 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1699 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1700 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1701 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1702 AssertRCReturn(rc, rc);
1703 }
1704
1705 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1706 {
1707 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1708 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
1709 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1710 }
1711
1712 if ((val & zap) != val)
1713 {
1714 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1715 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
1716 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1717 }
1718
1719 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, val);
1720 AssertRCReturn(rc, rc);
1721
1722 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1723 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1724 }
1725
1726 return VINF_SUCCESS;
1727}
1728
1729
1730/**
1731 * Sets up miscellaneous (everything other than Pin & Processor-based
1732 * VM-execution) control fields in the VMCS.
1733 *
1734 * @returns VBox status code.
1735 * @param pVM Pointer to the VM.
1736 * @param pVCpu Pointer to the VMCPU.
1737 */
1738static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1739{
1740 AssertPtr(pVM);
1741 AssertPtr(pVCpu);
1742
1743 int rc = VERR_GENERAL_FAILURE;
1744
1745 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1746 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
1747
1748 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
1749
1750 /*
1751 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1752 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1753 * We thus use the exception bitmap to control it rather than use both.
1754 */
1755 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
1756 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
1757
1758 /** @todo Explore possibility of using IO-bitmaps. */
1759 /* All IO & IOIO instructions cause VM-exits. */
1760 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
1761 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
1762
1763 /* Setup MSR autoloading/autostoring. */
1764 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1765 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1766 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1767 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1768 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
1769 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
1770
1771 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1772 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1773 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1774 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
1775
1776 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1777 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, 0xffffffffffffffffULL);
1778
1779 /* Setup debug controls */
1780 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo think about this. */
1781 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); /** @todo Intel spec. 26.6.3 think about this */
1782 AssertRCReturn(rc, rc);
1783 return rc;
1784}
1785
1786
1787/**
1788 * Sets up the initial exception bitmap in the VMCS based on static conditions
1789 * (i.e. conditions that cannot ever change at runtime).
1790 *
1791 * @returns VBox status code.
1792 * @param pVM Pointer to the VM.
1793 * @param pVCpu Pointer to the VMCPU.
1794 */
1795static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1796{
1797 AssertPtr(pVM);
1798 AssertPtr(pVCpu);
1799
1800 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1801
1802 uint32_t u32XcptBitmap = 0;
1803
1804 /* Without nested paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
1805 if (!pVM->hm.s.fNestedPaging)
1806 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
1807
1808 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
1809 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1810 AssertRCReturn(rc, rc);
1811 return rc;
1812}
1813
1814
1815/**
1816 * Sets up the initial guest-state mask. The guest-state mask is consulted
1817 * before reading guest-state fields from the VMCS as VMREADs can be expensive
1818 * for the nested virtualization case (as it would cause a VM-exit).
1819 *
1820 * @param pVCpu Pointer to the VMCPU.
1821 */
1822static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
1823{
1824 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
1825 pVCpu->hm.s.vmx.fUpdatedGuestState = VMX_UPDATED_GUEST_ALL;
1826 return VINF_SUCCESS;
1827}
1828
1829
1830/**
1831 * Does per-VM VT-x initialization.
1832 *
1833 * @returns VBox status code.
1834 * @param pVM Pointer to the VM.
1835 */
1836VMMR0DECL(int) VMXR0InitVM(PVM pVM)
1837{
1838 LogFlowFunc(("pVM=%p\n", pVM));
1839
1840 int rc = hmR0VmxStructsAlloc(pVM);
1841 if (RT_FAILURE(rc))
1842 {
1843 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
1844 return rc;
1845 }
1846
1847 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1848 {
1849 PVMCPU pVCpu = &pVM->aCpus[i];
1850
1851 /* Current guest paging mode. */
1852 pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
1853 }
1854
1855 return VINF_SUCCESS;
1856}
1857
1858
1859/**
1860 * Does per-VM VT-x termination.
1861 *
1862 * @returns VBox status code.
1863 * @param pVM Pointer to the VM.
1864 */
1865VMMR0DECL(int) VMXR0TermVM(PVM pVM)
1866{
1867 LogFlowFunc(("pVM=%p\n", pVM));
1868
1869#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1870 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
1871 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
1872#endif
1873 hmR0VmxStructsFree(pVM);
1874 return VINF_SUCCESS;
1875}
1876
1877
1878/**
1879 * Sets up the VM for execution under VT-x.
1880 * This function is only called once per-VM during initalization.
1881 *
1882 * @returns VBox status code.
1883 * @param pVM Pointer to the VM.
1884 */
1885VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
1886{
1887 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
1888 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1889
1890 LogFlowFunc(("pVM=%p\n", pVM));
1891
1892 /*
1893 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
1894 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
1895 */
1896 /* -XXX- change hmR3InitFinalizeR0() to fail if pRealModeTSS alloc fails. */
1897 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
1898 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
1899 || !pVM->hm.s.vmx.pRealModeTSS))
1900 {
1901 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
1902 return VERR_INTERNAL_ERROR;
1903 }
1904
1905 /* Initialize these always, see hmR3InitFinalizeR0().*/
1906 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
1907 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
1908
1909 /* Setup the tagged-TLB flush handlers. */
1910 int rc = hmR0VmxSetupTaggedTlb(pVM);
1911 if (RT_FAILURE(rc))
1912 {
1913 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
1914 return rc;
1915 }
1916
1917 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1918 {
1919 PVMCPU pVCpu = &pVM->aCpus[i];
1920 AssertPtr(pVCpu);
1921 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
1922
1923 /* Set revision dword at the beginning of the VMCS structure. */
1924 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
1925
1926 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
1927 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
1928 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1929 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1930
1931 /* Load this VMCS as the current VMCS. */
1932 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
1933 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1934 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1935
1936 /* Setup the pin-based VM-execution controls. */
1937 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
1938 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1939 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1940
1941 /* Setup the processor-based VM-execution controls. */
1942 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
1943 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1944 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1945
1946 /* Setup the rest (miscellaneous) VM-execution controls. */
1947 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
1948 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1949 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1950
1951 /* Setup the initial exception bitmap. */
1952 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
1953 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1954 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1955
1956 /* Setup the initial guest-state mask. */
1957 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
1958 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1959 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1960
1961#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1962 /* Setup the VMCS read cache as we queue up certain VMWRITEs that can only be done in 64-bit mode for 64-bit guests. */
1963 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
1964 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1965 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1966#endif
1967
1968 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
1969 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
1970 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1971 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1972
1973 /* Update the last error record for this VCPU. */
1974 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
1975 }
1976
1977 return VINF_SUCCESS;
1978}
1979
1980
1981/**
1982 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
1983 * the VMCS.
1984 *
1985 * @returns VBox status code.
1986 * @param pVM Pointer to the VM.
1987 * @param pVCpu Pointer to the VMCPU.
1988 */
1989DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
1990{
1991 RTCCUINTREG uReg = ASMGetCR0();
1992 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
1993
1994#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1995 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
1996 if (VMX_IS_64BIT_HOST_MODE())
1997 {
1998 uint64_t uReg = hmR0Get64bitCR3();
1999 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uReg);
2000 }
2001 else
2002#endif
2003 {
2004 uReg = ASMGetCR3();
2005 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2006 }
2007
2008 uReg = ASMGetCR4();
2009 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2010 AssertRCReturn(rc, rc);
2011 return rc;
2012}
2013
2014
2015/**
2016 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2017 * the host-state area in the VMCS.
2018 *
2019 * @returns VBox status code.
2020 * @param pVM Pointer to the VM.
2021 * @param pVCpu Pointer to the VMCPU.
2022 */
2023DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2024{
2025 int rc = VERR_INTERNAL_ERROR_5;
2026 RTSEL uSelCS = 0;
2027 RTSEL uSelSS = 0;
2028 RTSEL uSelDS = 0;
2029 RTSEL uSelES = 0;
2030 RTSEL uSelFS = 0;
2031 RTSEL uSelGS = 0;
2032 RTSEL uSelTR = 0;
2033
2034 /*
2035 * Host Selector registers.
2036 */
2037#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2038 if (VMX_IS_64BIT_HOST_MODE())
2039 {
2040 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2041 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2042 }
2043 else
2044 {
2045 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2046 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2047 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2048 }
2049#else
2050 uSelCS = ASMGetCS();
2051 uSelSS = ASMGetSS();
2052#endif
2053
2054 /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */
2055 /** @todo Verify if we have any platform that actually run with DS or ES with
2056 * RPL != 0 in kernel space. */
2057 uSelDS = 0;
2058 uSelES = 0;
2059 uSelFS = 0;
2060 uSelGS = 0;
2061 uSelTR = ASMGetTR();
2062
2063 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2064 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2065 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2066 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2067 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2068 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2069 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2070 Assert(uSelCS != 0);
2071 Assert(uSelTR != 0);
2072
2073 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2074#if 0
2075 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE))
2076 Assert(uSelSS != 0);
2077#endif
2078
2079 /* Write these host selector fields into the host-state area in the VMCS. */
2080 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS);
2081 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS);
2082 /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */
2083#if 0
2084 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS);
2085 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES);
2086 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS);
2087 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS);
2088#endif
2089 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR);
2090 AssertRCReturn(rc, rc);
2091
2092 /*
2093 * Host GDTR and IDTR.
2094 */
2095 /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should
2096 * be safe to -not- save and restore GDTR and IDTR in the assembly
2097 * code and just do it here and don't care if the limits are zapped on
2098 * VM-exit. */
2099 RTGDTR Gdtr;
2100 RT_ZERO(Gdtr);
2101#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2102 if (VMX_IS_64BIT_HOST_MODE())
2103 {
2104 X86XDTR64 Gtr64;
2105 X86XDTR64 Idtr64;
2106 hmR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2107 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr);
2108 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr);
2109 Gdtr.cbGdt = Gdtr64.cb;
2110 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2111 }
2112 else
2113#endif
2114 {
2115 RTIDTR Idtr;
2116 ASMGetGDTR(&Gdtr);
2117 ASMGetIDTR(&Idtr);
2118 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
2119 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
2120 }
2121 AssertRCReturn(rc, rc);
2122
2123 /*
2124 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2125 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2126 */
2127 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2128 {
2129 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit.TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2130 return VERR_VMX_INVALID_HOST_STATE;
2131 }
2132
2133 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2134#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2135 if (VMX_IS_64BIT_HOST_MODE())
2136 {
2137 /* We need the 64-bit TR base for hybrid darwin. */
2138 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2139 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2140 }
2141 else
2142#endif
2143 {
2144 uintptr_t uTRBase;
2145#if HC_ARCH_BITS == 64
2146 uTRBase = X86DESC64_BASE(pDesc);
2147#else
2148 uTRBase = X86DESC_BASE(pDesc);
2149#endif
2150 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2151 }
2152 AssertRCReturn(rc, rc);
2153
2154 /*
2155 * Host FS base and GS base.
2156 * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which .
2157 * would take care of the bases. In 64-bit, the MSRs come into play.
2158 */
2159#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2160 if (VMX_IS_64BIT_HOST_MODE())
2161 {
2162 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2163 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2164 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_FS_BASE, u64FSBase);
2165 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_GS_BASE, u64GSBase);
2166 AssertRCReturn(rc, rc);
2167 }
2168#endif
2169 return rc;
2170}
2171
2172
2173/**
2174 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2175 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2176 * the host after every successful VM exit.
2177 *
2178 * @returns VBox status code.
2179 * @param pVM Pointer to the VM.
2180 * @param pVCpu Pointer to the VMCPU.
2181 */
2182DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2183{
2184 AssertPtr(pVCpu);
2185 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2186
2187 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
2188 unsigned idxHostMsr = 0;
2189 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2190
2191 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2192 {
2193 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2194 pHostMsr->u32Reserved = 0;
2195#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2196 if (CPUMIsGuestInLongMode(pVCpu))
2197 {
2198 /* Must match the EFER value in our 64 bits switcher. */
2199 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2200 }
2201 else
2202#endif
2203 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
2204 pHostMsr++; idxHostMsr++;
2205 }
2206
2207#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2208 if (VMX_IS_64BIT_HOST_MODE())
2209 {
2210 pHostMsr->u32IndexMSR = MSR_K6_STAR;
2211 pHostMsr->u32Reserved = 0;
2212 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2213 pHostMsr++; idxHostMsr++;
2214 pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
2215 pHostMsr->u32Reserved = 0;
2216 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64 bits mode syscall rip */
2217 pHostMsr++; idxHostMsr++;
2218 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
2219 pHostMsr->u32Reserved = 0;
2220 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2221 pHostMsr++; idxHostMsr++;
2222 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */
2223#if 0
2224 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2225 pMsr->u32Reserved = 0;
2226 pMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2227 pHostMsr++; idxHostMsr++;
2228#endif
2229 }
2230#endif
2231
2232 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2233 if (RT_UNLIKELY(idxHostMsr > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
2234 {
2235 LogRel(("idxHostMsr=%u Cpu=%u\n", idxHostMsr, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
2236 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2237 }
2238
2239 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, idxHostMsr);
2240
2241 /*
2242 * Host Sysenter MSRs.
2243 */
2244 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2245#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2246 if (VMX_IS_64BIT_HOST_MODE())
2247 {
2248 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2249 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2250 }
2251 else
2252 {
2253 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2254 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2255 }
2256#elif HC_ARCH_BITS == 32
2257 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2258 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2259#else
2260 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2261 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2262#endif
2263 AssertRCReturn(rc, rc);
2264
2265 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2266 * hmR0VmxSetupExitCtls() !! */
2267 return rc;
2268}
2269
2270
2271/**
2272 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2273 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2274 * controls".
2275 *
2276 * @returns VBox status code.
2277 * @param pVM Pointer to the VM.
2278 * @param pVCpu Pointer to the VMCPU.
2279 * @param pCtx Pointer to the guest-CPU context.
2280 *
2281 * @remarks No-long-jump zone!!!
2282 */
2283DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2284{
2285 int rc = VINF_SUCCESS;
2286 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2287 {
2288 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2289 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2290
2291 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2292 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
2293
2294 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2295 if (CPUMIsGuestInLongModeEx(pCtx))
2296 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST;
2297 else
2298 Assert(!(val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST));
2299
2300 /*
2301 * The following should not be set (since we're not in SMM mode):
2302 * - VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM
2303 * - VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON
2304 */
2305
2306 /** @todo VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR,
2307 * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR,
2308 * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR */
2309
2310 if ((val & zap) != val)
2311 {
2312 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2313 pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
2314 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2315 }
2316
2317 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, val);
2318 AssertRCReturn(rc, rc);
2319
2320 /* Update VCPU with the currently set VM-exit controls. */
2321 pVCpu->hm.s.vmx.u32EntryCtls = val;
2322 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2323 }
2324 return rc;
2325}
2326
2327
2328/**
2329 * Sets up the VM-exit controls in the VMCS.
2330 *
2331 * @returns VBox status code.
2332 * @param pVM Pointer to the VM.
2333 * @param pVCpu Pointer to the VMCPU.
2334 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2335 * out-of-sync. Make sure to update the required fields
2336 * before using them.
2337 *
2338 * @remarks requires EFER.
2339 */
2340DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2341{
2342 int rc = VINF_SUCCESS;
2343 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2344 {
2345 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2346 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2347
2348 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2349 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
2350
2351 /* Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. */
2352#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2353 if (VMX_IS_64BIT_HOST_MODE())
2354 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;
2355 else
2356 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
2357#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2358 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2359 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2360 else
2361 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
2362#endif
2363
2364 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2365 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT));
2366
2367 /** @todo VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_PERF_MSR,
2368 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR,
2369 * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR,
2370 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR,
2371 * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR. */
2372
2373 if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
2374 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER;
2375
2376 if ((val & zap) != val)
2377 {
2378 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2379 pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
2380 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2381 }
2382
2383 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, val);
2384 AssertRCReturn(rc, rc);
2385
2386 /* Update VCPU with the currently set VM-exit controls. */
2387 pVCpu->hm.s.vmx.u32ExitCtls = val;
2388 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2389 }
2390 return rc;
2391}
2392
2393
2394/**
2395 * Loads the guest APIC and related state.
2396 *
2397 * @returns VBox status code.
2398 * @param pVM Pointer to the VM.
2399 * @param pVCpu Pointer to the VMCPU.
2400 * @param pCtx Pointer to the guest-CPU context.
2401 */
2402DECLINLINE(int) hmR0VmxLoadGuestApicState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2403{
2404 int rc = VINF_SUCCESS;
2405 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2406 {
2407 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2408 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
2409 {
2410 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2411
2412 bool fPendingIntr = false;
2413 uint8_t u8GuestTpr = 0;
2414 rc = PDMApicGetTPR(pVCpu, &u8GuestTpr, &fPendingIntr);
2415 AssertRCReturn(rc, rc);
2416
2417 /*
2418 * If there are external interrupts pending but masked by the TPR value, apply the threshold so that if the guest
2419 * lowers the TPR, it would cause a VM-exit and we can deliver the interrupt.
2420 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2421 * the interrupt when we VM-exit for other reasons.
2422 */
2423 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2424 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2425 uint32_t u32TprThreshold = fPendingIntr ? (u8GuestTpr >> 4) : 0;
2426 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2427
2428 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2429 AssertRCReturn(rc, rc);
2430
2431 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2432 if (pVM->hm.s.fTPRPatchingActive)
2433 {
2434 Assert(!CPUMIsGuestInLongModeEx(pCtx)); /* EFER always up-to-date. */
2435 pCtx->msrLSTAR = u8GuestTpr;
2436 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
2437 {
2438 /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
2439 if (fPendingIntr)
2440 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
2441 else
2442 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2443 }
2444 }
2445 }
2446
2447 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2448 }
2449 return rc;
2450}
2451
2452
2453/**
2454 * Loads the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
2455 * into the guest-state area in the VMCS.
2456 *
2457 * @param pVM Pointer to the VM.
2458 * @param pVCpu Pointer to the VMCPU.
2459 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2460 * out-of-sync. Make sure to update the required fields
2461 * before using them.
2462 *
2463 * @remarks No-long-jump zone!!!
2464 */
2465DECLINLINE(void) hmR0VmxLoadGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2466{
2467 /*
2468 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2469 * inhibit interrupts or clear any existing interrupt-inhibition.
2470 */
2471 uint32_t uIntrState = 0;
2472 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2473 {
2474 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2475 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (VMX_UPDATED_GUEST_RIP | VMX_UPDATED_GUEST_RFLAGS)),
2476 ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2477 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2478 {
2479 /*
2480 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2481 * VT-x the flag's condition to be cleared is met and thus the cleared state is correct.
2482 * hmR0VmxInjectPendingInterrupt() relies on us clearing this flag here.
2483 */
2484 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2485 }
2486 else if (pMixedCtx->eflags.Bits.u1IF)
2487 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2488 else
2489 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2490 }
2491
2492 Assert(!(uIntrState & 0xfffffff0)); /* Bits 31:4 MBZ. */
2493 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2494 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2495 AssertRC(rc);
2496}
2497
2498
2499/**
2500 * Loads the guest's RIP into the guest-state area in the VMCS.
2501 *
2502 * @returns VBox status code.
2503 * @param pVM Pointer to the VM.
2504 * @param pVCpu Pointer to the VMCPU.
2505 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2506 * out-of-sync. Make sure to update the required fields
2507 * before using them.
2508 *
2509 * @remarks No-long-jump zone!!!
2510 */
2511DECLINLINE(int) hmR0VmxLoadGuestRip(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2512{
2513 int rc = VINF_SUCCESS;
2514 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2515 {
2516 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2517 AssertRCReturn(rc, rc);
2518 Log(("VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
2519 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2520 }
2521 return rc;
2522}
2523
2524
2525/**
2526 * Loads the guest's RSP into the guest-state area in the VMCS.
2527 *
2528 * @returns VBox status code.
2529 * @param pVM Pointer to the VM.
2530 * @param pVCpu Pointer to the VMCPU.
2531 * @param pCtx Pointer to the guest-CPU context.
2532 *
2533 * @remarks No-long-jump zone!!!
2534 */
2535DECLINLINE(int) hmR0VmxLoadGuestRsp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2536{
2537 int rc = VINF_SUCCESS;
2538 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2539 {
2540 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2541 AssertRCReturn(rc, rc);
2542 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2543 }
2544 return rc;
2545}
2546
2547
2548/**
2549 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2550 *
2551 * @returns VBox status code.
2552 * @param pVM Pointer to the VM.
2553 * @param pVCpu Pointer to the VMCPU.
2554 * @param pCtx Pointer to the guest-CPU context.
2555 *
2556 * @remarks No-long-jump zone!!!
2557 */
2558DECLINLINE(int) hmR0VmxLoadGuestRflags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2559{
2560 int rc = VINF_SUCCESS;
2561 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2562 {
2563 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2564 Let us assert it as such and use native-width VMWRITE. */
2565 X86RFLAGS uRFlags = pCtx->rflags;
2566 Assert(uRFlags.u64 >> 32 == 0);
2567 uRFlags.u64 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2568 uRFlags.u64 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2569
2570 /*
2571 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2572 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2573 */
2574 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2575 {
2576 Assert(pVM->hm.s.vmx.pRealModeTSS);
2577 Assert(PDMVmmDevHeapIsEnabled(pVM));
2578 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uRFlags.u64; /* Save the original eflags of the real-mode guest. */
2579 uRFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2580 uRFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2581 }
2582
2583 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RFLAGS, uRFlags.u64);
2584 AssertRCReturn(rc, rc);
2585
2586 Log(("VMX_VMCS_GUEST_RFLAGS=%#RX64\n", uRFlags.u64));
2587 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2588 }
2589 return rc;
2590}
2591
2592
2593/**
2594 * Loads the guest's general purpose registers (GPRs) - RIP, RSP and RFLAGS
2595 * into the guest-state area in the VMCS. The remaining GPRs are handled in the
2596 * assembly code.
2597 *
2598 * @returns VBox status code.
2599 * @param pVM Pointer to the VM.
2600 * @param pVCpu Pointer to the VMCPU.
2601 * @param pCtx Pointer to the guest-CPU context.
2602 *
2603 * @remarks No-long-jump zone!!!
2604 */
2605DECLINLINE(int) hmR0VmxLoadGuestGprs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2606{
2607 LogFlowFunc(("pVM=%p pVCpu=%p pCtx=%p\n", pVM, pVCpu, pCtx));
2608 int rc = hmR0VmxLoadGuestRip(pVM, pVCpu, pCtx);
2609 rc |= hmR0VmxLoadGuestRsp(pVM, pVCpu, pCtx);
2610 rc |= hmR0VmxLoadGuestRflags(pVM, pVCpu, pCtx);
2611 return rc;
2612}
2613
2614
2615/**
2616 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
2617 * in the VMCS.
2618 *
2619 * @returns VBox status code.
2620 * @param pVM Pointer to the VM.
2621 * @param pVCpu Pointer to the VMCPU.
2622 * @param pCtx Pointer to the guest-CPU context.
2623 *
2624 * @remarks No-long-jump zone!!!
2625 */
2626DECLINLINE(int) hmR0VmxLoadGuestControlRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2627{
2628 int rc = VINF_SUCCESS;
2629
2630 /*
2631 * Guest CR0.
2632 * Guest FPU.
2633 */
2634 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2635 {
2636 uint64_t u64GuestCR0 = pCtx->cr0;
2637
2638 /* The guest's view (read access) of its CR0 is unblemished. */
2639 rc = VMXWriteVmcsGstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64GuestCR0);
2640 AssertRCReturn(rc, rc);
2641 Log2(("VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX64\n", u64GuestCR0));
2642
2643 /* Setup VT-x's view of the guest CR0. */
2644 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2645 if (pVM->hm.s.fNestedPaging)
2646 {
2647 if (CPUMIsGuestPagingEnabledEx(pCtx))
2648 {
2649 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
2650 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
2651 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
2652 }
2653 else
2654 {
2655 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
2656 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
2657 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
2658 }
2659
2660 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2661 AssertRCReturn(rc, rc);
2662 }
2663 else
2664 u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a VM-exit. */
2665
2666 /*
2667 * Guest FPU bits.
2668 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
2669 * CPUs to support VT-x (prob. means all the way up to Nehalem) and no mention of with regards to UX in VM-entry checks.
2670 */
2671 u64GuestCR0 |= X86_CR0_NE;
2672 bool fInterceptNM = false;
2673 if (CPUMIsGuestFPUStateActive(pVCpu))
2674 {
2675 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
2676 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
2677 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
2678 }
2679 else
2680 {
2681 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
2682 u64GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
2683 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
2684 }
2685
2686 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
2687 bool fInterceptMF = false;
2688 if (!(pCtx->cr0 & X86_CR0_NE))
2689 fInterceptMF = true;
2690
2691 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
2692 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2693 {
2694 Assert(PDMVmmDevHeapIsEnabled(pVM));
2695 Assert(pVM->hm.s.vmx.pRealModeTSS);
2696 pVCpu->hm.s.vmx.u32XcptBitmap |= VMX_REAL_MODE_XCPT_BITMAP;
2697 fInterceptNM = true;
2698 fInterceptMF = true;
2699 }
2700 else
2701 pVCpu->hm.s.vmx.u32XcptBitmap &= ~VMX_REAL_MODE_XCPT_BITMAP;
2702
2703 if (fInterceptNM)
2704 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
2705 else
2706 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
2707
2708 if (fInterceptMF)
2709 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
2710 else
2711 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
2712
2713 /* Additional intercepts for debugging, define these yourself explicitly. */
2714#ifdef VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS
2715 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_BP)
2716 | RT_BIT(X86_XCPT_DB)
2717 | RT_BIT(X86_XCPT_DE)
2718 | RT_BIT(X86_XCPT_NM)
2719 | RT_BIT(X86_XCPT_UD)
2720 | RT_BIT(X86_XCPT_NP)
2721 | RT_BIT(X86_XCPT_SS)
2722 | RT_BIT(X86_XCPT_GP)
2723 | RT_BIT(X86_XCPT_PF)
2724 | RT_BIT(X86_XCPT_MF);
2725#elif defined(VBOX_ALWAYS_TRAP_PF)
2726 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF)
2727#endif
2728
2729 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
2730
2731 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
2732 uint64_t uSetCR0 = (pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2733 uint64_t uZapCR0 = (pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2734 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
2735 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
2736 else
2737 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2738
2739 u64GuestCR0 |= uSetCR0;
2740 u64GuestCR0 &= uZapCR0;
2741 u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
2742
2743 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
2744 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR0, u64GuestCR0);
2745 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
2746 Log2(("VMX_VMCS_GUEST_CR0=%#RX32\n", (uint32_t)u64GuestCR0));
2747
2748 /*
2749 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
2750 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
2751 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
2752 */
2753 uint64_t u64CR0Mask = 0;
2754 u64CR0Mask = X86_CR0_PE
2755 | X86_CR0_WP
2756 | X86_CR0_PG
2757 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
2758 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
2759 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
2760
2761 /* We don't need to intercept changes to CR0.PE with unrestricted guests. */
2762 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2763 u64CR0Mask &= ~X86_CR0_PE;
2764
2765 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
2766 if (fInterceptNM)
2767 u64CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
2768 else
2769 u64CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
2770
2771 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
2772 pVCpu->hm.s.vmx.cr0_mask = u64CR0Mask;
2773 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64CR0Mask);
2774 AssertRCReturn(rc, rc);
2775
2776 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
2777 }
2778
2779 /*
2780 * Guest CR2.
2781 * It's always loaded in the assembler code. Nothing to do here.
2782 */
2783
2784 /*
2785 * Guest CR3.
2786 */
2787 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
2788 {
2789 uint64_t u64GuestCR3 = 0;
2790 if (pVM->hm.s.fNestedPaging)
2791 {
2792 pVCpu->hm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu);
2793
2794 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2795 Assert(pVCpu->hm.s.vmx.GCPhysEPTP);
2796 Assert(!(pVCpu->hm.s.vmx.GCPhysEPTP & 0xfff0000000000000ULL));
2797 Assert(!(pVCpu->hm.s.vmx.GCPhysEPTP & 0xfff));
2798
2799 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
2800 pVCpu->hm.s.vmx.GCPhysEPTP |= VMX_EPT_MEMTYPE_WB
2801 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
2802
2803 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2804 AssertMsg( ((pVCpu->hm.s.vmx.GCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2805 && ((pVCpu->hm.s.vmx.GCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
2806 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.GCPhysEPTP));
2807
2808 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.GCPhysEPTP);
2809 AssertRCReturn(rc, rc);
2810 Log(("VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.GCPhysEPTP));
2811
2812 if ( pVM->hm.s.vmx.fUnrestrictedGuest
2813 || CPUMIsGuestPagingEnabledEx(pCtx))
2814 {
2815 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2816 if (CPUMIsGuestInPAEModeEx(pCtx))
2817 {
2818 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
2819 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
2820 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
2821 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
2822 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
2823 AssertRCReturn(rc, rc);
2824 }
2825
2826 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
2827 have Unrestricted Execution to handle the guest when it's not using paging. */
2828 u64GuestCR3 = pCtx->cr3;
2829 }
2830 else
2831 {
2832 /*
2833 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
2834 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
2835 * EPT takes care of translating it to host-physical addresses.
2836 */
2837 RTGCPHYS GCPhys;
2838 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2839 Assert(PDMVmmDevHeapIsEnabled(pVM));
2840
2841 /* We obtain it here every time as the guest could have relocated this PCI region. */
2842 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2843 AssertRCReturn(rc, rc);
2844
2845 u64GuestCR3 = GCPhys;
2846 }
2847 }
2848 else
2849 {
2850 /* Non-nested paging case, just use the hypervisor's CR3. */
2851 u64GuestCR3 = PGMGetHyperCR3(pVCpu);
2852 }
2853
2854 Log2(("VMX_VMCS_GUEST_CR3=%#RX64\n", u64GuestCR3));
2855 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, u64GuestCR3);
2856 AssertRCReturn(rc, rc);
2857
2858 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
2859 }
2860
2861 /*
2862 * Guest CR4.
2863 */
2864 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
2865 {
2866 uint64_t u64GuestCR4 = pCtx->cr4;
2867
2868 /* The guest's view of its CR4 is unblemished. */
2869 rc = VMXWriteVmcsGstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, u64GuestCR4);
2870 AssertRCReturn(rc, rc);
2871 Log2(("VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RGv\n", u64GuestCR4));
2872
2873 /* Setup VT-x's view of the guest CR4. */
2874 /*
2875 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
2876 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2877 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2878 */
2879 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2880 {
2881 Assert(pVM->hm.s.vmx.pRealModeTSS);
2882 Assert(PDMVmmDevHeapIsEnabled(pVM));
2883 u64GuestCR4 &= ~X86_CR4_VME;
2884 }
2885
2886 if (pVM->hm.s.fNestedPaging)
2887 {
2888 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2889 && !pVM->hm.s.vmx.fUnrestrictedGuest)
2890 {
2891 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2892 u64GuestCR4 |= X86_CR4_PSE;
2893 /* Our identity mapping is a 32 bits page directory. */
2894 u64GuestCR4 &= ~X86_CR4_PAE;
2895 }
2896 /* else use guest CR4.*/
2897 }
2898 else
2899 {
2900 /*
2901 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2902 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2903 */
2904 switch (pVCpu->hm.s.enmShadowMode)
2905 {
2906 case PGMMODE_REAL: /* Real-mode. */
2907 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2908 case PGMMODE_32_BIT: /* 32-bit paging. */
2909 {
2910 u64GuestCR4 &= ~X86_CR4_PAE;
2911 break;
2912 }
2913
2914 case PGMMODE_PAE: /* PAE paging. */
2915 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2916 {
2917 u64GuestCR4 |= X86_CR4_PAE;
2918 break;
2919 }
2920
2921 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2922 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2923#ifdef VBOX_ENABLE_64_BITS_GUESTS
2924 break;
2925#endif
2926 default:
2927 AssertFailed();
2928 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2929 }
2930 }
2931
2932 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
2933 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
2934 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
2935 u64GuestCR4 |= uSetCR4;
2936 u64GuestCR4 &= uZapCR4;
2937
2938 /* Write VT-x's view of the guest CR4 into the VMCS. */
2939 Log2(("VMX_VMCS_GUEST_CR4=%#RGv (Set=%#RX32 Zap=%#RX32)\n", u64GuestCR4, uSetCR4, uZapCR4));
2940 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR4, u64GuestCR4);
2941
2942 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
2943 uint64_t u64CR4Mask = 0;
2944 u64CR4Mask = X86_CR4_VME
2945 | X86_CR4_PAE
2946 | X86_CR4_PGE
2947 | X86_CR4_PSE
2948 | X86_CR4_VMXE;
2949 pVCpu->hm.s.vmx.cr4_mask = u64CR4Mask;
2950 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64CR4Mask);
2951 AssertRCReturn(rc, rc);
2952
2953 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
2954 }
2955 return rc;
2956}
2957
2958
2959/**
2960 * Loads the guest debug registers into the guest-state area in the VMCS.
2961 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
2962 *
2963 * @returns VBox status code.
2964 * @param pVM Pointer to the VM.
2965 * @param pVCpu Pointer to the VMCPU.
2966 * @param pCtx Pointer to the guest-CPU context.
2967 *
2968 * @remarks No-long-jump zone!!!
2969 */
2970DECLINLINE(int) hmR0VmxLoadGuestDebugRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2971{
2972 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
2973 return VINF_SUCCESS;
2974
2975#ifdef DEBUG
2976 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
2977 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
2978 {
2979 Assert((pCtx->dr[7] & 0xffffffff00000000ULL) == 0); /* upper 32 bits are reserved (MBZ). */
2980 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
2981 Assert((pCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */
2982 Assert((pCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */
2983 }
2984#endif
2985
2986 int rc = VERR_INTERNAL_ERROR_5;
2987 bool fInterceptDB = false;
2988 bool fInterceptMovDRx = false;
2989 if (DBGFIsStepping(pVCpu))
2990 {
2991 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF. */
2992 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
2993 {
2994 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
2995 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2996 AssertRCReturn(rc, rc);
2997 Assert(fInterceptDB == false);
2998 }
2999 else if (pCtx->eflags.Bits.u1TF) /* If the guest is using its TF bit, we cannot single step in DBGF. */
3000 {
3001 Assert(fInterceptDB == false);
3002 /** @todo can we somehow signal DBGF that it cannot single-step instead of
3003 * just continuing? */
3004 }
3005 else
3006 fInterceptDB = true;
3007 }
3008 else
3009 Assert(fInterceptDB == false); /* If we are not single stepping in DBGF, there is no need to intercept #DB. */
3010
3011 /*
3012 * If the guest is using its DRx registers and the host DRx does not yet contain the guest DRx values,
3013 * load the guest DRx registers into the host and don't cause VM-exits on guest's MOV DRx accesses.
3014 * The same for the hypervisor DRx registers, priority is for the guest here.
3015 */
3016 if ( (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3017 && !CPUMIsGuestDebugStateActive(pVCpu))
3018 {
3019 /* Save the host and load the guest debug registers. This will make the guest debug state active. */
3020 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
3021 AssertRC(rc);
3022 Assert(CPUMIsGuestDebugStateActive(pVCpu));
3023 Assert(fInterceptMovDRx == false);
3024 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3025 }
3026 else if ( CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD)
3027 && !CPUMIsHyperDebugStateActive(pVCpu))
3028 {
3029 /* Save the host and load the hypervisor debug registers. This will make the hyper debug state active. */
3030 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
3031 AssertRC(rc);
3032 Assert(CPUMIsHyperDebugStateActive(pVCpu));
3033 fInterceptMovDRx = true;
3034 }
3035 else
3036 Assert(fInterceptMovDRx == false); /* No need to intercept MOV DRx if DBGF is not active nor the guest is debugging. */
3037
3038 /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
3039 if (fInterceptDB)
3040 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3041 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3042 {
3043#ifndef VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS
3044 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3045#endif
3046 }
3047
3048 /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
3049 if (fInterceptMovDRx)
3050 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
3051 else
3052 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
3053
3054 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3055 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
3056
3057 /* The guest's view of its DR7 is unblemished. */
3058 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
3059
3060 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3061 return rc;
3062}
3063
3064
3065#ifdef DEBUG
3066/**
3067 * Debug function to validate segment registers.
3068 */
3069static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3070{
3071 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3072 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3073 && ( !CPUMIsGuestInRealModeEx(pCtx)
3074 && !CPUMIsGuestInV86ModeEx(pCtx)))
3075 {
3076 /* Protected mode checks */
3077 /* CS */
3078 Assert(pCtx->cs.Attr.n.u1Present);
3079 Assert(!(pCtx->cs.Attr.u & 0xf00));
3080 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3081 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3082 || !(pCtx->cs.Attr.n.u1Granularity));
3083 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3084 || (pCtx->cs.Attr.n.u1Granularity));
3085 Assert(pCtx->cs.Attr.u && pCtx->cs.Attr.u != VMX_SEL_UNUSABLE); /* CS cannot be loaded with NULL in protected mode. */
3086 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3087 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3088 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3089 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3090 else
3091 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3092 /* SS */
3093 if (pCtx->ss.Attr.u && pCtx->ss.Attr.u != VMX_SEL_UNUSABLE)
3094 {
3095 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3096 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3097 Assert(pCtx->ss.Attr.n.u1Present);
3098 Assert(!(pCtx->ss.Attr.u & 0xf00));
3099 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3100 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3101 || !(pCtx->ss.Attr.n.u1Granularity));
3102 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3103 || (pCtx->ss.Attr.n.u1Granularity));
3104 }
3105 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3106 /* CR0 might not be up-to-date here always, hence disabled. */
3107#if 0
3108 if (!pCtx->cr0 & X86_CR0_PE)
3109 Assert(!pCtx->ss.Attr.n.u2Dpl);
3110#endif
3111 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3112 if (pCtx->ds.Attr.u && pCtx->ds.Attr.u != VMX_SEL_UNUSABLE)
3113 {
3114 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3115 Assert(pCtx->ds.Attr.n.u1Present);
3116 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3117 Assert(!(pCtx->ds.Attr.u & 0xf00));
3118 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3119 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3120 || !(pCtx->ds.Attr.n.u1Granularity));
3121 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3122 || (pCtx->ds.Attr.n.u1Granularity));
3123 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3124 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3125 }
3126 if (pCtx->es.Attr.u && pCtx->es.Attr.u != VMX_SEL_UNUSABLE)
3127 {
3128 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3129 Assert(pCtx->es.Attr.n.u1Present);
3130 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3131 Assert(!(pCtx->es.Attr.u & 0xf00));
3132 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3133 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3134 || !(pCtx->es.Attr.n.u1Granularity));
3135 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3136 || (pCtx->es.Attr.n.u1Granularity));
3137 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3138 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3139 }
3140 if (pCtx->fs.Attr.u && pCtx->fs.Attr.u != VMX_SEL_UNUSABLE)
3141 {
3142 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3143 Assert(pCtx->fs.Attr.n.u1Present);
3144 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3145 Assert(!(pCtx->fs.Attr.u & 0xf00));
3146 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3147 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3148 || !(pCtx->fs.Attr.n.u1Granularity));
3149 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3150 || (pCtx->fs.Attr.n.u1Granularity));
3151 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3152 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3153 }
3154 if (pCtx->gs.Attr.u && pCtx->gs.Attr.u != VMX_SEL_UNUSABLE)
3155 {
3156 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3157 Assert(pCtx->gs.Attr.n.u1Present);
3158 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3159 Assert(!(pCtx->gs.Attr.u & 0xf00));
3160 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3161 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3162 || !(pCtx->gs.Attr.n.u1Granularity));
3163 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3164 || (pCtx->gs.Attr.n.u1Granularity));
3165 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3166 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3167 }
3168 /* 64-bit capable CPUs. */
3169# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3170 Assert(!(pCtx->cs.u64Base >> 32));
3171 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3172 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3173 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3174# endif
3175 }
3176 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3177 || ( CPUMIsGuestInRealModeEx(pCtx)
3178 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3179 {
3180 /* Real and v86 mode checks. */
3181 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3182 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3183 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3184 {
3185 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3186 }
3187 else
3188 {
3189 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3190 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3191 }
3192
3193 /* CS */
3194 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3195 Assert(pCtx->cs.u32Limit == 0xffff);
3196 Assert(u32CSAttr == 0xf3);
3197 /* SS */
3198 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3199 Assert(pCtx->ss.u32Limit == 0xffff);
3200 Assert(u32SSAttr == 0xf3);
3201 /* DS */
3202 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3203 Assert(pCtx->ds.u32Limit == 0xffff);
3204 Assert(u32DSAttr == 0xf3);
3205 /* ES */
3206 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3207 Assert(pCtx->es.u32Limit == 0xffff);
3208 Assert(u32ESAttr == 0xf3);
3209 /* FS */
3210 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3211 Assert(pCtx->fs.u32Limit == 0xffff);
3212 Assert(u32FSAttr == 0xf3);
3213 /* GS */
3214 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3215 Assert(pCtx->gs.u32Limit == 0xffff);
3216 Assert(u32GSAttr == 0xf3);
3217 /* 64-bit capable CPUs. */
3218# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3219 Assert(!(pCtx->cs.u64Base >> 32));
3220 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3221 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3222 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3223# endif
3224 }
3225}
3226#endif /* DEBUG */
3227
3228
3229/**
3230 * Writes a guest segment register into the guest-state area in the VMCS.
3231 *
3232 * @returns VBox status code.
3233 * @param pVM Pointer to the VM.
3234 * @param pVCpu Pointer to the VMCPU.
3235 * @param idxSel Index of the selector in the VMCS.
3236 * @param idxLimit Index of the segment limit in the VMCS.
3237 * @param idxBase Index of the segment base in the VMCS.
3238 * @param idxAccess Index of the access rights of the segment in the VMCS.
3239 * @param pSelReg Pointer to the segment selector.
3240 * @param pCtx Pointer to the guest-CPU context.
3241 *
3242 * @remarks No-long-jump zone!!!
3243 */
3244DECLINLINE(int) hmR0VmxWriteSegmentReg(PVM pVM, PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3245 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3246{
3247 int rc;
3248 rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3249 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3250 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3251 AssertRCReturn(rc, rc);
3252
3253 uint32_t u32Access = pSelReg->Attr.u;
3254 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3255 {
3256 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3257 u32Access = 0xf3;
3258 Assert(pVM->hm.s.vmx.pRealModeTSS);
3259 Assert(PDMVmmDevHeapIsEnabled(pVM));
3260 }
3261 else
3262 {
3263 /*
3264 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3265 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3266 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3267 * loaded in protected-mode have their attribute as 0.
3268 */
3269 if (!u32Access)
3270 u32Access = VMX_SEL_UNUSABLE;
3271 }
3272
3273 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3274 AssertMsg((u32Access == VMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3275 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3276
3277 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3278 AssertRCReturn(rc, rc);
3279 return rc;
3280}
3281
3282
3283/**
3284 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3285 * into the guest-state area in the VMCS.
3286 *
3287 * @returns VBox status code.
3288 * @param pVM Pointer to the VM.
3289 * @param pVCPU Pointer to the VMCPU.
3290 * @param pCtx Pointer to the guest-CPU context.
3291 *
3292 * @remarks No-long-jump zone!!!
3293 * @remarks Requires RFLAGS (for debug assertions).
3294 */
3295DECLINLINE(int) hmR0VmxLoadGuestSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3296{
3297 int rc = VERR_INTERNAL_ERROR_5;
3298
3299 /*
3300 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3301 */
3302 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3303 {
3304 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3305 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3306 {
3307 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pCtx->cs.Attr.u;
3308 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pCtx->ss.Attr.u;
3309 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pCtx->ds.Attr.u;
3310 pVCpu->hm.s.vmx.RealMode.uAttrES.u = pCtx->es.Attr.u;
3311 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pCtx->fs.Attr.u;
3312 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pCtx->gs.Attr.u;
3313 }
3314
3315#ifdef VBOX_WITH_REM
3316 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3317 {
3318 Assert(pVM->hm.s.vmx.pRealModeTSS);
3319 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
3320 if (pVCpu->hm.s.vmx.enmLastSeenGuestMode != enmGuestMode)
3321 {
3322 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3323 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
3324 && enmGuestMode >= PGMMODE_PROTECTED)
3325 {
3326 /* Signal that recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3327 in real-mode (e.g. OpenBSD 4.0) */
3328 REMFlushTBs(pVM);
3329 Log2(("Switch to protected mode detected!\n"));
3330 }
3331 pVCpu->hm.s.vmx.enmLastSeenGuestMode = enmGuestMode;
3332 }
3333 }
3334#endif
3335 rc = hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3336 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pCtx->cs, pCtx);
3337 rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3338 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pCtx->ss, pCtx);
3339 rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3340 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pCtx->ds, pCtx);
3341 rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3342 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pCtx->es, pCtx);
3343 rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3344 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pCtx->fs, pCtx);
3345 rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3346 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pCtx->gs, pCtx);
3347 AssertRCReturn(rc, rc);
3348
3349#ifdef DEBUG
3350 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pCtx);
3351#endif
3352 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3353 }
3354
3355 /*
3356 * Guest TR.
3357 */
3358 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3359 {
3360 /*
3361 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3362 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3363 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3364 */
3365 uint16_t u16Sel = 0;
3366 uint32_t u32Limit = 0;
3367 uint64_t u64Base = 0;
3368 uint32_t u32AccessRights = 0;
3369
3370 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3371 {
3372 u16Sel = pCtx->tr.Sel;
3373 u32Limit = pCtx->tr.u32Limit;
3374 u64Base = pCtx->tr.u64Base;
3375 u32AccessRights = pCtx->tr.Attr.u;
3376 }
3377 else
3378 {
3379 Assert(pVM->hm.s.vmx.pRealModeTSS);
3380 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3381
3382 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3383 RTGCPHYS GCPhys;
3384 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3385 AssertRCReturn(rc, rc);
3386
3387 X86DESCATTR DescAttr;
3388 DescAttr.u = 0;
3389 DescAttr.n.u1Present = 1;
3390 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3391
3392 u16Sel = 0;
3393 u32Limit = HM_VTX_TSS_SIZE;
3394 u64Base = GCPhys; /* in real-mode phys = virt. */
3395 u32AccessRights = DescAttr.u;
3396 }
3397
3398 /* Validate. */
3399 Assert(!(u16Sel & RT_BIT(2)));
3400 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3401 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3402 AssertMsg(!(u32AccessRights & VMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3403 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3404 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3405 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3406 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3407 Assert( (u32Limit & 0xfff) == 0xfff
3408 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3409 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
3410 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3411
3412 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel);
3413 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
3414 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
3415 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
3416 AssertRCReturn(rc, rc);
3417
3418 Log2(("VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3419 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3420 }
3421
3422 /*
3423 * Guest GDTR.
3424 */
3425 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3426 {
3427 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
3428 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt);
3429 AssertRCReturn(rc, rc);
3430
3431 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000ULL)); /* Bits 31:16 MBZ. */
3432 Log2(("VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pCtx->gdtr.pGdt));
3433 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3434 }
3435
3436 /*
3437 * Guest LDTR.
3438 */
3439 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3440 {
3441 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3442 uint32_t u32Access = 0;
3443 if (!(pCtx->ldtr.Attr.u & VMX_SEL_UNUSABLE))
3444 u32Access = VMX_SEL_UNUSABLE;
3445 else
3446 u32Access = pCtx->ldtr.Attr.u;
3447
3448 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pCtx->ldtr.Sel);
3449 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit);
3450 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base);
3451 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
3452 AssertRCReturn(rc, rc);
3453
3454 /* Validate. */
3455 if (!(u32Access & VMX_SEL_UNUSABLE))
3456 {
3457 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3458 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3459 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3460 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3461 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3462 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3463 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
3464 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3465 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
3466 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3467 }
3468
3469 Log2(("VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pCtx->ldtr.u64Base));
3470 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3471 }
3472
3473 /*
3474 * Guest IDTR.
3475 */
3476 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3477 {
3478 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
3479 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt);
3480 AssertRCReturn(rc, rc);
3481
3482 Assert(!(pCtx->idtr.cbIdt & 0xffff0000ULL)); /* Bits 31:16 MBZ. */
3483 Log2(("VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pCtx->idtr.pIdt));
3484 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3485 }
3486
3487 /*
3488 * Guest FS & GS base MSRs.
3489 * We already initialized the FS & GS base as part of the guest segment registers, but the guest's FS/GS base
3490 * MSRs might have changed (e.g. due to WRMSR) and we need to update the bases if that happened. These MSRs
3491 * are only available in 64-bit mode.
3492 */
3493 /** @todo Avoid duplication of this code in assembly (see MYPUSHSEGS) - it
3494 * should not be necessary to do it in assembly again. */
3495 if (CPUMIsGuestInLongModeEx(pCtx))
3496 {
3497 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_FS_BASE_MSR)
3498 {
3499 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_FS_BASE, pCtx->fs.u64Base);
3500 AssertRCReturn(rc, rc);
3501 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_FS_BASE_MSR;
3502 }
3503
3504 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GS_BASE_MSR)
3505 {
3506 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GS_BASE, pCtx->gs.u64Base);
3507 AssertRCReturn(rc, rc);
3508 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GS_BASE_MSR;
3509 }
3510 }
3511 else
3512 pVCpu->hm.s.fContextUseFlags &= ~(HM_CHANGED_GUEST_FS_BASE_MSR | HM_CHANGED_GUEST_GS_BASE_MSR);
3513
3514 return VINF_SUCCESS;
3515}
3516
3517
3518/**
3519 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3520 * areas. These MSRs will automatically be loaded to the host CPU on every
3521 * successful VM entry and stored from the host CPU on every successful VM exit.
3522 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3523 *
3524 * @returns VBox status code.
3525 * @param pVM Pointer to the VM.
3526 * @param pVCpu Pointer to the VMCPU.
3527 * @param pCtx Pointer to the guest-CPU context.
3528 *
3529 * @remarks No-long-jump zone!!!
3530 */
3531DECLINLINE(int) hmR0VmxLoadGuestMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3532{
3533 AssertPtr(pVCpu);
3534 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3535
3536 /*
3537 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3538 */
3539 int rc = VINF_SUCCESS;
3540 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3541 {
3542 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3543 unsigned cGuestMsrs = 0;
3544
3545 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3546 const bool fSupportsNX = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
3547 const bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3548 if (fSupportsNX || fSupportsLongMode)
3549 {
3550 /** @todo support save IA32_EFER, i.e.
3551 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR, in which case the
3552 * guest EFER need not be part of the VM-entry MSR-load area. */
3553 pGuestMsr->u32IndexMSR = MSR_K6_EFER;
3554 pGuestMsr->u32Reserved = 0;
3555 pGuestMsr->u64Value = pCtx->msrEFER;
3556 /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */
3557 if (!CPUMIsGuestInLongModeEx(pCtx))
3558 pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
3559 pGuestMsr++; cGuestMsrs++;
3560 if (fSupportsLongMode)
3561 {
3562 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
3563 pGuestMsr->u32Reserved = 0;
3564 pGuestMsr->u64Value = pCtx->msrLSTAR; /* 64 bits mode syscall rip */
3565 pGuestMsr++; cGuestMsrs++;
3566 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
3567 pGuestMsr->u32Reserved = 0;
3568 pGuestMsr->u64Value = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3569 pGuestMsr++; cGuestMsrs++;
3570 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
3571 pGuestMsr->u32Reserved = 0;
3572 pGuestMsr->u64Value = pCtx->msrSFMASK; /* syscall flag mask */
3573 pGuestMsr++; cGuestMsrs++;
3574 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */
3575#if 0
3576 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
3577 pGuestMsr->u32Reserved = 0;
3578 pGuestMsr->u64Value = pCtx->msrKERNELGSBASE; /* swapgs exchange value */
3579 pGuestMsr++; cGuestMsrs++;
3580#endif
3581 }
3582 }
3583
3584 /*
3585 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3586 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3587 */
3588 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3589 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3590 {
3591 pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
3592 pGuestMsr->u32Reserved = 0;
3593 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3594 AssertRCReturn(rc, rc);
3595 pGuestMsr++; cGuestMsrs++;
3596 }
3597
3598 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3599 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
3600 {
3601 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3602 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3603 }
3604
3605 /* Update the VCPU's copy of the guest MSR count. */
3606 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3607 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs);
3608 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs);
3609 AssertRCReturn(rc, rc);
3610
3611 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3612 }
3613
3614 /*
3615 * Guest Sysenter MSRs.
3616 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3617 * VM exits on WRMSRs for these MSRs.
3618 */
3619 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
3620 {
3621 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
3622 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
3623 }
3624 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
3625 {
3626 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
3627 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
3628 }
3629 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
3630 {
3631 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
3632 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
3633 }
3634 AssertRCReturn(rc, rc);
3635
3636 return rc;
3637}
3638
3639
3640/**
3641 * Loads the guest activity state into the guest-state area in the VMCS.
3642 *
3643 * @returns VBox status code.
3644 * @param pVM Pointer to the VM.
3645 * @param pVCpu Pointer to the VMCPU.
3646 * @param pCtx Pointer to the guest-CPU context.
3647 *
3648 * @remarks No-long-jump zone!!!
3649 */
3650DECLINLINE(int) hmR0VmxLoadGuestActivityState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3651{
3652 /** @todo See if we can make use of other states, e.g.
3653 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
3654 int rc = VINF_SUCCESS;
3655 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
3656 {
3657 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
3658 AssertRCReturn(rc, rc);
3659 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
3660 }
3661 return rc;
3662}
3663
3664
3665/**
3666 * Sets up the appropriate function to run guest code.
3667 *
3668 * @returns VBox status code.
3669 * @param pVM Pointer to the VM.
3670 * @param pVCpu Pointer to the VMCPU.
3671 * @param pCtx Pointer to the guest-CPU context.
3672 *
3673 * @remarks No-long-jump zone!!!
3674 */
3675DECLINLINE(int) hmR0VmxSetupVMRunHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3676{
3677 if (CPUMIsGuestInLongModeEx(pCtx))
3678 {
3679#ifndef VBOX_ENABLE_64_BITS_GUESTS
3680 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3681#endif
3682 Assert(pVM->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
3683#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3684 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
3685 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
3686#else
3687 /* 64-bit host or hybrid host. */
3688 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
3689#endif
3690 }
3691 else
3692 {
3693 /* Guest is not in long mode, use the 32-bit handler. */
3694 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
3695 }
3696 Assert(pVCpu->hm.s.vmx.pfnStartVM);
3697 return VINF_SUCCESS;
3698}
3699
3700
3701/**
3702 * Wrapper for running the guest code in VT-x.
3703 *
3704 * @returns VBox strict status code.
3705 * @param pVM Pointer to the VM.
3706 * @param pVCpu Pointer to the VMCPU.
3707 * @param pCtx Pointer to the guest-CPU context.
3708 *
3709 * @remarks No-long-jump zone!!!
3710 */
3711DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3712{
3713 /*
3714 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
3715 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
3716 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
3717 */
3718#ifdef VBOX_WITH_KERNEL_USING_XMM
3719 return hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
3720#else
3721 return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
3722#endif
3723}
3724
3725
3726/**
3727 * Report world-switch error and dump some useful debug info.
3728 *
3729 * @param pVM Pointer to the VM.
3730 * @param pVCpu Pointer to the VMCPU.
3731 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
3732 * @param pCtx Pointer to the guest-CPU context.
3733 * @param pVmxTransient Pointer to the VMX transient structure (only
3734 * exitReason updated).
3735 */
3736static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
3737{
3738 Assert(pVM);
3739 Assert(pVCpu);
3740 Assert(pCtx);
3741 Assert(pVmxTransient);
3742 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3743
3744 Log(("VM-entry failure: %Rrc\n", rcVMRun));
3745 switch (rcVMRun)
3746 {
3747 case VERR_VMX_INVALID_VMXON_PTR:
3748 AssertFailed();
3749 break;
3750 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
3751 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
3752 {
3753 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.lasterror.u32ExitReason);
3754 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
3755 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
3756 AssertRC(rc);
3757
3758#ifdef VBOX_STRICT
3759 Log(("uExitReason %#x (VmxTransient %#x)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,
3760 pVmxTransient->uExitReason));
3761 Log(("Exit Qualification %#x\n", pVmxTransient->uExitQualification));
3762 Log(("InstrError %#x\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));
3763 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= VMX_INSTR_ERROR_MAX)
3764 Log(("InstrError Desc. \"%s\"\n", s_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));
3765 else
3766 Log(("InstrError Desc. Range exceeded %u\n", VMX_INSTR_ERROR_MAX));
3767
3768 /* VMX control bits. */
3769 uint32_t u32Val;
3770 uint64_t u64Val;
3771 RTHCUINTREG uHCReg;
3772 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &u32Val); AssertRC(rc);
3773 Log(("VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS %#RX32\n", u32Val));
3774 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &u32Val); AssertRC(rc);
3775 Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS %#RX32\n", u32Val));
3776 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, &u32Val); AssertRC(rc);
3777 Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2 %#RX32\n", u32Val));
3778 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &u32Val); AssertRC(rc);
3779 Log(("VMX_VMCS32_CTRL_ENTRY_CONTROLS %#RX32\n", u32Val));
3780 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, &u32Val); AssertRC(rc);
3781 Log(("VMX_VMCS32_CTRL_EXIT_CONTROLS %#RX32\n", u32Val));
3782 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
3783 Log(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
3784 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
3785 Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
3786 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
3787 Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
3788 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
3789 Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
3790 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
3791 Log(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
3792 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
3793 Log(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
3794 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
3795 Log(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
3796 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
3797 Log(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
3798 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
3799 Log(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
3800 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
3801 Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
3802 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
3803 Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
3804 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
3805 Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
3806 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
3807 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
3808 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
3809 Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
3810 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
3811 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
3812 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
3813 Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
3814
3815 /* Guest bits. */
3816 RTGCUINTREG uGCReg;
3817 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uGCReg); AssertRC(rc);
3818 Log(("Old Guest Rip %#RGv New %#RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)uGCReg));
3819 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uGCReg); AssertRC(rc);
3820 Log(("Old Guest Rsp %#RGv New %#RGv\n", (RTGCPTR)pCtx->rsp, (RTGCPTR)uGCReg));
3821 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RFLAGS, &uGCReg); AssertRC(rc);
3822 Log(("Old Guest Rflags %#RGr New %#RGr\n", (RTGCPTR)pCtx->rflags.u64, (RTGCPTR)uGCReg));
3823 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
3824 Log(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
3825
3826 /* Host bits. */
3827 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
3828 Log(("Host CR0 %#RHr\n", uHCReg));
3829 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
3830 Log(("Host CR3 %#RHr\n", uHCReg));
3831 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
3832 Log(("Host CR4 %#RHr\n", uHCReg));
3833
3834 RTGDTR HostGdtr;
3835 PCX86DESCHC pDesc;
3836 ASMGetGDTR(&HostGdtr);
3837 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val);
3838 Log(("Host CS %#08x\n", u32Val));
3839 if (u32Val < HostGdtr.cbGdt)
3840 {
3841 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3842 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
3843 }
3844
3845 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
3846 Log(("Host DS %#08x\n", u32Val));
3847 if (u32Val < HostGdtr.cbGdt)
3848 {
3849 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3850 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
3851 }
3852
3853 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
3854 Log(("Host ES %#08x\n", u32Val));
3855 if (u32Val < HostGdtr.cbGdt)
3856 {
3857 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3858 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
3859 }
3860
3861 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
3862 Log(("Host FS %#08x\n", u32Val));
3863 if (u32Val < HostGdtr.cbGdt)
3864 {
3865 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3866 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
3867 }
3868
3869 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
3870 Log(("Host GS %#08x\n", u32Val));
3871 if (u32Val < HostGdtr.cbGdt)
3872 {
3873 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3874 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
3875 }
3876
3877 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
3878 Log(("Host SS %#08x\n", u32Val));
3879 if (u32Val < HostGdtr.cbGdt)
3880 {
3881 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3882 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
3883 }
3884
3885 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
3886 Log(("Host TR %#08x\n", u32Val));
3887 if (u32Val < HostGdtr.cbGdt)
3888 {
3889 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3890 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
3891 }
3892
3893 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
3894 Log(("Host TR Base %#RHv\n", uHCReg));
3895 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
3896 Log(("Host GDTR Base %#RHv\n", uHCReg));
3897 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
3898 Log(("Host IDTR Base %#RHv\n", uHCReg));
3899 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
3900 Log(("Host SYSENTER CS %#08x\n", u32Val));
3901 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
3902 Log(("Host SYSENTER EIP %#RHv\n", uHCReg));
3903 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
3904 Log(("Host SYSENTER ESP %#RHv\n", uHCReg));
3905 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
3906 Log(("Host RSP %#RHv\n", uHCReg));
3907 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
3908 Log(("Host RIP %#RHv\n", uHCReg));
3909# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3910 if (VMX_IS_64BIT_HOST_MODE())
3911 {
3912 Log(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
3913 Log(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
3914 Log(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
3915 Log(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
3916 Log(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
3917 Log(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
3918 }
3919# endif
3920#endif /* VBOX_STRICT */
3921 break;
3922 }
3923
3924 default:
3925 /* Impossible */
3926 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
3927 break;
3928 }
3929 NOREF(pVM);
3930}
3931
3932
3933#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3934#ifndef VMX_USE_CACHED_VMCS_ACCESSES
3935# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
3936#endif
3937
3938#ifdef VBOX_STRICT
3939static bool hmR0VmxIsValidReadField(uint32_t idxField)
3940{
3941 switch (idxField)
3942 {
3943 case VMX_VMCS_GUEST_RIP:
3944 case VMX_VMCS_GUEST_RSP:
3945 case VMX_VMCS_GUEST_RFLAGS:
3946 case VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE:
3947 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
3948 case VMX_VMCS_GUEST_CR0:
3949 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
3950 case VMX_VMCS_GUEST_CR4:
3951 case VMX_VMCS_GUEST_DR7:
3952 case VMX_VMCS32_GUEST_SYSENTER_CS:
3953 case VMX_VMCS_GUEST_SYSENTER_EIP:
3954 case VMX_VMCS_GUEST_SYSENTER_ESP:
3955 case VMX_VMCS32_GUEST_GDTR_LIMIT:
3956 case VMX_VMCS_GUEST_GDTR_BASE:
3957 case VMX_VMCS32_GUEST_IDTR_LIMIT:
3958 case VMX_VMCS_GUEST_IDTR_BASE:
3959 case VMX_VMCS16_GUEST_FIELD_CS:
3960 case VMX_VMCS32_GUEST_CS_LIMIT:
3961 case VMX_VMCS_GUEST_CS_BASE:
3962 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
3963 case VMX_VMCS16_GUEST_FIELD_DS:
3964 case VMX_VMCS32_GUEST_DS_LIMIT:
3965 case VMX_VMCS_GUEST_DS_BASE:
3966 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
3967 case VMX_VMCS16_GUEST_FIELD_ES:
3968 case VMX_VMCS32_GUEST_ES_LIMIT:
3969 case VMX_VMCS_GUEST_ES_BASE:
3970 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
3971 case VMX_VMCS16_GUEST_FIELD_FS:
3972 case VMX_VMCS32_GUEST_FS_LIMIT:
3973 case VMX_VMCS_GUEST_FS_BASE:
3974 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
3975 case VMX_VMCS16_GUEST_FIELD_GS:
3976 case VMX_VMCS32_GUEST_GS_LIMIT:
3977 case VMX_VMCS_GUEST_GS_BASE:
3978 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
3979 case VMX_VMCS16_GUEST_FIELD_SS:
3980 case VMX_VMCS32_GUEST_SS_LIMIT:
3981 case VMX_VMCS_GUEST_SS_BASE:
3982 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
3983 case VMX_VMCS16_GUEST_FIELD_LDTR:
3984 case VMX_VMCS32_GUEST_LDTR_LIMIT:
3985 case VMX_VMCS_GUEST_LDTR_BASE:
3986 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
3987 case VMX_VMCS16_GUEST_FIELD_TR:
3988 case VMX_VMCS32_GUEST_TR_LIMIT:
3989 case VMX_VMCS_GUEST_TR_BASE:
3990 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
3991 case VMX_VMCS32_RO_EXIT_REASON:
3992 case VMX_VMCS32_RO_VM_INSTR_ERROR:
3993 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
3994 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
3995 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
3996 case VMX_VMCS32_RO_EXIT_INSTR_INFO:
3997 case VMX_VMCS_RO_EXIT_QUALIFICATION:
3998 case VMX_VMCS32_RO_IDT_INFO:
3999 case VMX_VMCS32_RO_IDT_ERROR_CODE:
4000 case VMX_VMCS_GUEST_CR3:
4001 case VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL:
4002 return true;
4003 }
4004 return false;
4005}
4006
4007static bool hmR0VmxIsValidWriteField(uint32_t idxField)
4008{
4009 switch (idxField)
4010 {
4011 case VMX_VMCS_GUEST_LDTR_BASE:
4012 case VMX_VMCS_GUEST_TR_BASE:
4013 case VMX_VMCS_GUEST_GDTR_BASE:
4014 case VMX_VMCS_GUEST_IDTR_BASE:
4015 case VMX_VMCS_GUEST_SYSENTER_EIP:
4016 case VMX_VMCS_GUEST_SYSENTER_ESP:
4017 case VMX_VMCS_GUEST_CR0:
4018 case VMX_VMCS_GUEST_CR4:
4019 case VMX_VMCS_GUEST_CR3:
4020 case VMX_VMCS_GUEST_DR7:
4021 case VMX_VMCS_GUEST_RIP:
4022 case VMX_VMCS_GUEST_RSP:
4023 case VMX_VMCS_GUEST_CS_BASE:
4024 case VMX_VMCS_GUEST_DS_BASE:
4025 case VMX_VMCS_GUEST_ES_BASE:
4026 case VMX_VMCS_GUEST_FS_BASE:
4027 case VMX_VMCS_GUEST_GS_BASE:
4028 case VMX_VMCS_GUEST_SS_BASE:
4029 return true;
4030 }
4031 return false;
4032}
4033#endif /* VBOX_STRICT */
4034
4035/**
4036 * Executes the specified handler in 64-bit mode.
4037 *
4038 * @returns VBox status code.
4039 * @param pVM Pointer to the VM.
4040 * @param pVCpu Pointer to the VMCPU.
4041 * @param pCtx Pointer to the guest CPU context.
4042 * @param pfnHandler Pointer to the RC handler function.
4043 * @param cbParam Number of parameters.
4044 * @param paParam Array of 32-bit parameters.
4045 */
4046VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
4047 uint32_t *paParam)
4048{
4049 int rc, rc2;
4050 PHMGLOBLCPUINFO pCpu;
4051 RTHCPHYS HCPhysCpuPage;
4052 RTHCUINTREG uOldEFlags;
4053
4054 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4055 Assert(pfnHandler);
4056 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4057 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4058
4059#ifdef VBOX_STRICT
4060 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4061 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4062
4063 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4064 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4065#endif
4066
4067 /* Disable interrupts. */
4068 uOldEFlags = ASMIntDisableFlags();
4069
4070#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4071 RTCPUID idHostCpu = RTMpCpuId();
4072 CPUMR0SetLApic(pVM, idHostCpu);
4073#endif
4074
4075 pCpu = HMR0GetCurrentCpu();
4076 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4077
4078 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4079 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4080
4081 /* Leave VMX Root Mode. */
4082 VMXDisable();
4083
4084 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4085
4086 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4087 CPUMSetHyperEIP(pVCpu, pfnHandler);
4088 for (int i = (int)cbParam - 1; i >= 0; i--)
4089 CPUMPushHyper(pVCpu, paParam[i]);
4090
4091 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4092
4093 /* Call the switcher. */
4094 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4095 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4096
4097 /** @todo replace with hmR0VmxEnterRootMode() and LeaveRootMode(). */
4098 /* Make sure the VMX instructions don't cause #UD faults. */
4099 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4100
4101 /* Re-enter VMX Root Mode */
4102 rc2 = VMXEnable(HCPhysCpuPage);
4103 if (RT_FAILURE(rc2))
4104 {
4105 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4106 ASMSetFlags(uOldEFlags);
4107 return rc2;
4108 }
4109
4110 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4111 AssertRC(rc2);
4112 Assert(!(ASMGetFlags() & X86_EFL_IF));
4113 ASMSetFlags(uOldEFlags);
4114 return rc;
4115}
4116
4117
4118/**
4119 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4120 * supporting 64-bit guests.
4121 *
4122 * @returns VBox status code.
4123 * @param fResume Whether to VMLAUNCH or VMRESUME.
4124 * @param pCtx Pointer to the guest-CPU context.
4125 * @param pCache Pointer to the VMCS cache.
4126 * @param pVM Pointer to the VM.
4127 * @param pVCpu Pointer to the VMCPU.
4128 */
4129DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4130{
4131 uint32_t aParam[6];
4132 PHMGLOBLCPUINFO pCpu = NULL;
4133 RTHCPHYS HCPhysCpuPage = 0;
4134 int rc = VERR_INTERNAL_ERROR_5;
4135
4136 pCpu = HMR0GetCurrentCpu();
4137 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4138
4139#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4140 pCache->uPos = 1;
4141 pCache->interPD = PGMGetInterPaeCR3(pVM);
4142 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4143#endif
4144
4145#ifdef DEBUG
4146 pCache->TestIn.HCPhysCpuPage= 0;
4147 pCache->TestIn.HCPhysVmcs = 0;
4148 pCache->TestIn.pCache = 0;
4149 pCache->TestOut.HCPhysVmcs = 0;
4150 pCache->TestOut.pCache = 0;
4151 pCache->TestOut.pCtx = 0;
4152 pCache->TestOut.eflags = 0;
4153#endif
4154
4155 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4156 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4157 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4158 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4159 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4160 aParam[5] = 0;
4161
4162#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4163 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4164 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4165#endif
4166 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]);
4167
4168#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4169 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4170 Assert(pCtx->dr[4] == 10);
4171 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4172#endif
4173
4174#ifdef DEBUG
4175 AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4176 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4177 pVCpu->hm.s.vmx.HCPhysVmcs));
4178 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4179 pCache->TestOut.HCPhysVmcs));
4180 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4181 pCache->TestOut.pCache));
4182 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4183 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4184 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4185 pCache->TestOut.pCtx));
4186 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4187#endif
4188 return rc;
4189}
4190
4191
4192/**
4193 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4194 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4195 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4196 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4197 *
4198 * @returns VBox status code.
4199 * @param pVM Pointer to the VM.
4200 * @param pVCpu Pointer to the VMCPU.
4201 */
4202static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4203{
4204#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4205{ \
4206 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4207 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4208 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4209}
4210
4211#define VMXLOCAL_INIT_VMCS_SELREG(REG, pCache) \
4212{ \
4213 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS16_GUEST_FIELD_##REG); \
4214 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_##REG##_LIMIT); \
4215 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_##REG##_BASE); \
4216 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS); \
4217}
4218
4219 AssertPtr(pVM);
4220 AssertPtr(pVCpu);
4221 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4222
4223 /* 16-bit guest-state fields (16-bit selectors and their corresponding 32-bit limit & 32-bit access-rights fields). */
4224 VMXLOCAL_INIT_VMCS_SELREG(ES, pCache);
4225 VMXLOCAL_INIT_VMCS_SELREG(CS, pCache);
4226 VMXLOCAL_INIT_VMCS_SELREG(SS, pCache);
4227 VMXLOCAL_INIT_VMCS_SELREG(DS, pCache);
4228 VMXLOCAL_INIT_VMCS_SELREG(FS, pCache);
4229 VMXLOCAL_INIT_VMCS_SELREG(GS, pCache);
4230 VMXLOCAL_INIT_VMCS_SELREG(LDTR, pCache);
4231 VMXLOCAL_INIT_VMCS_SELREG(TR, pCache);
4232
4233 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4234#if 0
4235 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4236 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4237 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4238 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4239 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4240 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4241 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4242 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4243 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4244#endif
4245
4246 /* 32-bit guest-state fields. */
4247 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_GDTR_LIMIT);
4248 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_IDTR_LIMIT);
4249 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE);
4250 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_SYSENTER_CS);
4251 /* Unused 32-bit guest-state fields. */
4252#if 0
4253 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_ACTIVITY_STATE);
4254 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_SMBASE);
4255 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS32_GUEST_PREEMPTION_TIMER_VALUE);
4256#endif
4257
4258 /* Natural width guest-state fields. */
4259 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4260 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4261 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4262 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4263 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4264 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4265 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4266 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4267 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4268 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4269 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4270 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4271 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DR7);
4272 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4273 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4274 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RFLAGS);
4275 /* Unused natural width guest-state fields. */
4276#if 0
4277 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS);
4278 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4279#endif
4280 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4281 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4282
4283 if (pVM->hm.s.fNestedPaging)
4284 {
4285 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4286 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL);
4287 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4288 }
4289 else
4290 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4291
4292#undef VMXLOCAL_INIT_VMCS_SELREG
4293#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4294 return VINF_SUCCESS;
4295}
4296
4297
4298/**
4299 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4300 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4301 * darwin, running 64-bit guests).
4302 *
4303 * @returns VBox status code.
4304 * @param pVCpu Pointer to the VMCPU.
4305 * @param idxField The VMCS field encoding.
4306 * @param u64Val 16, 32 or 64 bits value.
4307 */
4308VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4309{
4310 int rc;
4311 switch (idxField)
4312 {
4313 /*
4314 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4315 */
4316 /* 64-bit Control fields. */
4317 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4318 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4319 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4320 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4321 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4322 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4323 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4324 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4325 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4326 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4327 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4328 case VMX_VMCS64_CTRL_EPTP_FULL:
4329 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4330 /* 64-bit Read-only data fields. */
4331 case VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL:
4332 /* 64-bit Guest-state fields. */
4333 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4334 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4335 case VMX_VMCS64_GUEST_PAT_FULL:
4336 case VMX_VMCS64_GUEST_EFER_FULL:
4337 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4338 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4339 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4340 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4341 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4342 /* 64-bit Host-state fields. */
4343 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4344 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4345 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4346 {
4347 rc = VMXWriteVmcs32(idxField, u64Val);
4348 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32ULL)); /* hmpf, do we really need the "ULL" suffix? */
4349 break;
4350 }
4351
4352 /*
4353 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4354 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4355 */
4356 /* Natural-width Guest-state fields. */
4357 case VMX_VMCS_GUEST_CR0:
4358 case VMX_VMCS_GUEST_CR3:
4359 case VMX_VMCS_GUEST_CR4:
4360 case VMX_VMCS_GUEST_ES_BASE:
4361 case VMX_VMCS_GUEST_CS_BASE:
4362 case VMX_VMCS_GUEST_SS_BASE:
4363 case VMX_VMCS_GUEST_DS_BASE:
4364 case VMX_VMCS_GUEST_FS_BASE:
4365 case VMX_VMCS_GUEST_GS_BASE:
4366 case VMX_VMCS_GUEST_LDTR_BASE:
4367 case VMX_VMCS_GUEST_TR_BASE:
4368 case VMX_VMCS_GUEST_GDTR_BASE:
4369 case VMX_VMCS_GUEST_IDTR_BASE:
4370 case VMX_VMCS_GUEST_DR7:
4371 case VMX_VMCS_GUEST_RSP:
4372 case VMX_VMCS_GUEST_RIP:
4373 case VMX_VMCS_GUEST_RFLAGS:
4374 case VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS:
4375 case VMX_VMCS_GUEST_SYSENTER_ESP:
4376 case VMX_VMCS_GUEST_SYSENTER_EIP:
4377 {
4378 if ((u64Val >> 32ULL) == 0)
4379 {
4380 /* If this field is 64-bit, VT-x will zero out the top bits. */
4381 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4382 }
4383 else
4384 {
4385 /* Assert that only the 32->64 switcher case should ever come here. */
4386 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4387 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4388 }
4389 break;
4390 }
4391
4392 default:
4393 {
4394 AssertMsgFailed(("VMXWriteVmcs64Ex: invalid field %#x (pVCpu=%p u64Val=%RX64)\n", (unsigned)idxField, pVCpu, u64Val));
4395 rc = VERR_INVALID_PARAMETER;
4396 break;
4397 }
4398 }
4399 AssertRCReturn(rc, rc);
4400 return rc;
4401}
4402
4403
4404/**
4405 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4406 * hosts (except darwin) for 64-bit guests.
4407 *
4408 * @param pVCpu Pointer to the VMCPU.
4409 * @param idxField The VMCS field encoding.
4410 * @param u64Val 16, 32 or 64 bits value.
4411 */
4412VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4413{
4414 AssertPtr(pVCpu);
4415 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4416
4417 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4418 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4419
4420 /* Make sure there are no duplicates. */
4421 for (unsigned i = 0; i < pCache->Write.cValidEntries; i++)
4422 {
4423 if (pCache->Write.aField[i] == idxField)
4424 {
4425 pCache->Write.aFieldVal[i] = u64Val;
4426 return VINF_SUCCESS;
4427 }
4428 }
4429
4430 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4431 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4432 pCache->Write.cValidEntries++;
4433 return VINF_SUCCESS;
4434}
4435
4436
4437/**
4438 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4439 *
4440 * @param pVCpu Pointer to the VMCPU.
4441 * @param pCache Pointer to the VMCS cache.
4442 *
4443 * @remarks No-long-jump zone!!!
4444 */
4445VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4446{
4447 AssertPtr(pCache);
4448 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4449 {
4450 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4451 AssertRC(rc);
4452 }
4453 pCache->Write.cValidEntries = 0;
4454}
4455
4456
4457/**
4458 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4459 *
4460 * @param pVCpu Pointer to the VMCPU.
4461 * @param pCache Pointer to the VMCS cache.
4462 *
4463 * @remarks No-long-jump zone!!!
4464 */
4465VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4466{
4467 AssertPtr(pCache);
4468 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4469 {
4470 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4471 AssertRC(rc);
4472 }
4473}
4474#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4475
4476
4477/**
4478 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4479 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4480 * timer.
4481 *
4482 * @returns VBox status code.
4483 * @param pVM Pointer to the VM.
4484 * @param pVCpu Pointer to the VMCPU.
4485 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4486 * out-of-sync. Make sure to update the required fields
4487 * before using them.
4488 * @remarks No-long-jump zone!!!
4489 */
4490static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4491{
4492 int rc = VERR_INTERNAL_ERROR_5;
4493 bool fOffsettedTsc = false;
4494 if (pVM->hm.s.vmx.fUsePreemptTimer)
4495 {
4496 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4497
4498 /* Make sure the returned values have sane upper and lower boundaries. */
4499 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4500 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4501 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4502 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4503
4504 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4505 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPTION_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4506 }
4507 else
4508 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4509
4510 if (fOffsettedTsc)
4511 {
4512 uint64_t u64CurTSC = ASMReadTSC();
4513 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4514 {
4515 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4516 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4517
4518 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4519 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4520 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4521 }
4522 else
4523 {
4524 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4525 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4526 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4527 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4528 }
4529 }
4530 else
4531 {
4532 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4533 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4534 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4535 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4536 }
4537}
4538
4539
4540/**
4541 * Determines if an exception is a benign exception. Benign exceptions
4542 * are ones which cannot cause double-faults.
4543 *
4544 * @returns true if the exception is benign, false otherwise.
4545 * @param uVector The exception vector.
4546 */
4547DECLINLINE(bool) hmR0VmxIsBenignXcpt(const uint32_t uVector)
4548{
4549 switch (uVector)
4550 {
4551 case X86_XCPT_DB:
4552 case X86_XCPT_NMI:
4553 case X86_XCPT_BP:
4554 case X86_XCPT_OF:
4555 case X86_XCPT_BR:
4556 case X86_XCPT_UD:
4557 case X86_XCPT_NM:
4558 case X86_XCPT_CO_SEG_OVERRUN:
4559 case X86_XCPT_MF:
4560 case X86_XCPT_AC:
4561 case X86_XCPT_MC:
4562 case X86_XCPT_XF:
4563 return true;
4564 default:
4565 return false;
4566 }
4567}
4568
4569
4570/**
4571 * Determines if an exception is a contributory exception. Contributory
4572 * exceptions are ones which can cause double-faults.
4573 *
4574 * @returns true if the exception is contributory, false otherwise.
4575 * @param uVector The exception vector.
4576 */
4577DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4578{
4579 switch (uVector)
4580 {
4581 case X86_XCPT_GP:
4582 case X86_XCPT_SS:
4583 case X86_XCPT_NP:
4584 case X86_XCPT_TS:
4585 case X86_XCPT_DE:
4586 return true;
4587 default:
4588 return false;
4589 }
4590 return false;
4591}
4592
4593
4594/**
4595 * Determines if we are intercepting any contributory exceptions.
4596 *
4597 * @returns true if we are intercepting any contributory exception, false
4598 * otherwise.
4599 * @param pVCpu Pointer to the VMCPU.
4600 */
4601DECLINLINE(bool) hmR0VmxInterceptingContributoryXcpts(PVMCPU pVCpu)
4602{
4603 if ( (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_GP))
4604 || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_SS))
4605 || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_NP))
4606 || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_TS))
4607 || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_DE)))
4608 {
4609 return true;
4610 }
4611 return false;
4612}
4613
4614
4615/**
4616 * Handle a condition that occurred while delivering an event through the guest
4617 * IDT.
4618 *
4619 * @returns VBox status code (informational error codes included).
4620 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4621 * @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to
4622 * continue execution of the guest which will delivery the #DF.
4623 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4624 *
4625 * @param pVM Pointer to the VM.
4626 * @param pVCpu Pointer to the VMCPU.
4627 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4628 * out-of-sync. Make sure to update the required fields
4629 * before using them.
4630 * @param pVmxTransient Pointer to the VMX transient structure.
4631 *
4632 * @remarks No-long-jump zone!!!
4633 */
4634static int hmR0VmxCheckExitDueToEventDelivery(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4635{
4636 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4637 AssertRCReturn(rc, rc);
4638 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4639 {
4640 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4641 AssertRCReturn(rc, rc);
4642
4643 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4644 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4645 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4646
4647 typedef enum
4648 {
4649 VMXREFLECTXCPT_XCPT, /* Reflect Idt-vectoring exception. */
4650 VMXREFLECTXCPT_DF, /* Reflect a double-fault to the guest. */
4651 VMXREFLECTXCPT_TF, /* Reflect a triple fault state to the VMM. */
4652 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4653 } VMXREFLECTXCPT;
4654
4655 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4656 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4657 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4658 {
4659 if ( hmR0VmxIsBenignXcpt(uIdtVector)
4660 || hmR0VmxIsBenignXcpt(uExitVector)
4661 || ( hmR0VmxIsContributoryXcpt(uIdtVector)
4662 && uExitVector == X86_XCPT_PF))
4663 {
4664 enmReflect = VMXREFLECTXCPT_XCPT;
4665 }
4666 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF))
4667 && uIdtVector == X86_XCPT_PF
4668 && uExitVector == X86_XCPT_PF)
4669 {
4670 pVmxTransient->fVectoringPF = true;
4671 }
4672 else if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4673 && hmR0VmxIsContributoryXcpt(uExitVector))
4674 {
4675 enmReflect = VMXREFLECTXCPT_DF;
4676 }
4677 else if ( hmR0VmxInterceptingContributoryXcpts(pVCpu)
4678 && uIdtVector == X86_XCPT_PF
4679 && hmR0VmxIsContributoryXcpt(uExitVector))
4680 {
4681 enmReflect = VMXREFLECTXCPT_DF;
4682 }
4683 else if (uIdtVector == X86_XCPT_DF)
4684 enmReflect = VMXREFLECTXCPT_TF;
4685 }
4686 else if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4687 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
4688 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
4689 {
4690 /*
4691 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
4692 * (whatever they are) as they reoccur when restarting the instruction.
4693 */
4694 enmReflect = VMXREFLECTXCPT_XCPT;
4695 }
4696
4697 Assert(pVmxTransient->fVectoringPF == false || enmReflect == VMXREFLECTXCPT_NONE);
4698 switch (enmReflect)
4699 {
4700 case VMXREFLECTXCPT_XCPT:
4701 {
4702 Assert(!pVCpu->hm.s.Event.fPending);
4703 pVCpu->hm.s.Event.fPending = true;
4704 pVCpu->hm.s.Event.u64IntrInfo = VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo);
4705 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
4706 {
4707 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
4708 AssertRCReturn(rc, rc);
4709 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
4710 }
4711 else
4712 pVCpu->hm.s.Event.u32ErrCode = 0;
4713 Log(("Pending event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));
4714 break;
4715 }
4716
4717 case VMXREFLECTXCPT_DF:
4718 {
4719 uint32_t u32IntrInfo;
4720 u32IntrInfo = X86_XCPT_DF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
4721 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4722 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4723
4724 Assert(!pVCpu->hm.s.Event.fPending);
4725 pVCpu->hm.s.Event.fPending = true;
4726 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4727 pVCpu->hm.s.Event.u32ErrCode = 0;
4728 rc = VINF_VMX_DOUBLE_FAULT;
4729 Log(("Pending #DF %#RX64 uIdt=%#x uExit=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uIdtVector, uExitVector));
4730 break;
4731 }
4732
4733 case VMXREFLECTXCPT_TF:
4734 {
4735 Log(("Pending triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
4736 rc = VINF_EM_RESET;
4737 break;
4738 }
4739
4740 default: /* shut up gcc. */
4741 break;
4742 }
4743 }
4744 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || rc == VINF_VMX_DOUBLE_FAULT);
4745 return rc;
4746}
4747
4748
4749/**
4750 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
4751 *
4752 * @returns VBox status code.
4753 * @param pVM Pointer to the VM.
4754 * @param pVCpu Pointer to the VMCPU.
4755 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4756 * out-of-sync. Make sure to update the required fields
4757 * before using them.
4758 *
4759 * @remarks No-long-jump zone!!!
4760 */
4761DECLINLINE(int) hmR0VmxSaveGuestCR0(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4762{
4763 int rc = VINF_SUCCESS;
4764 if ( !(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR0)
4765 || !(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_FPU))
4766 {
4767 RTGCUINTREG uVal = 0;
4768 RTCCUINTREG uShadow = 0;
4769 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR0, &uVal);
4770 rc |= VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
4771 AssertRCReturn(rc, rc);
4772 uVal = (uShadow & pVCpu->hm.s.vmx.cr0_mask) | (uVal & ~pVCpu->hm.s.vmx.cr0_mask);
4773 CPUMSetGuestCR0(pVCpu, uVal);
4774 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR0 | VMX_UPDATED_GUEST_FPU;
4775 }
4776 return rc;
4777}
4778
4779
4780/**
4781 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
4782 *
4783 * @returns VBox status code.
4784 * @param pVM Pointer to the VM.
4785 * @param pVCpu Pointer to the VMCPU.
4786 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4787 * out-of-sync. Make sure to update the required fields
4788 * before using them.
4789 *
4790 * @remarks No-long-jump zone!!!
4791 */
4792DECLINLINE(int) hmR0VmxSaveGuestCR4(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4793{
4794 int rc = VINF_SUCCESS;
4795 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR4))
4796 {
4797 RTGCUINTREG uVal = 0;
4798 RTCCUINTREG uShadow = 0;
4799 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR4, &uVal);
4800 rc |= VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
4801 AssertRCReturn(rc, rc);
4802 uVal = (uShadow & pVCpu->hm.s.vmx.cr4_mask) | (uVal & ~pVCpu->hm.s.vmx.cr4_mask);
4803 CPUMSetGuestCR4(pVCpu, uVal);
4804 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR4;
4805 }
4806 return rc;
4807}
4808
4809
4810/**
4811 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
4812 *
4813 * @returns VBox status code.
4814 * @param pVM Pointer to the VM.
4815 * @param pVCpu Pointer to the VMCPU.
4816 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4817 * out-of-sync. Make sure to update the required fields
4818 * before using them.
4819 *
4820 * @remarks No-long-jump zone!!!
4821 */
4822DECLINLINE(int) hmR0VmxSaveGuestRip(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4823{
4824 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RIP)
4825 return VINF_SUCCESS;
4826
4827 RTGCUINTREG uVal = 0;
4828 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uVal);
4829 AssertRCReturn(rc, rc);
4830 pMixedCtx->rip = uVal;
4831 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_RIP;
4832 return rc;
4833}
4834
4835
4836/**
4837 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
4838 *
4839 * @returns VBox status code.
4840 * @param pVM Pointer to the VM.
4841 * @param pVCpu Pointer to the VMCPU.
4842 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4843 * out-of-sync. Make sure to update the required fields
4844 * before using them.
4845 *
4846 * @remarks No-long-jump zone!!!
4847 */
4848DECLINLINE(int) hmR0VmxSaveGuestRsp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4849{
4850 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RSP)
4851 return VINF_SUCCESS;
4852
4853 RTGCUINTREG uVal = 0;
4854 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal);
4855 AssertRCReturn(rc, rc);
4856 pMixedCtx->rsp = uVal;
4857 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_RSP;
4858 return rc;
4859}
4860
4861
4862/**
4863 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
4864 *
4865 * @returns VBox status code.
4866 * @param pVM Pointer to the VM.
4867 * @param pVCpu Pointer to the VMCPU.
4868 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4869 * out-of-sync. Make sure to update the required fields
4870 * before using them.
4871 *
4872 * @remarks No-long-jump zone!!!
4873 */
4874DECLINLINE(int) hmR0VmxSaveGuestRflags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4875{
4876 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RFLAGS)
4877 return VINF_SUCCESS;
4878
4879 RTGCUINTREG uVal = 0;
4880 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RFLAGS, &uVal);
4881 AssertRCReturn(rc, rc);
4882 pMixedCtx->rflags.u64 = uVal;
4883
4884 /* Undo our real-on-v86-mode changes to eflags if necessary. */
4885 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4886 {
4887 Assert(pVM->hm.s.vmx.pRealModeTSS);
4888 Log(("Saving real-mode RFLAGS VT-x view=%#RX64\n", pMixedCtx->rflags.u64));
4889 pMixedCtx->eflags.Bits.u1VM = 0;
4890 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
4891 }
4892
4893 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_RFLAGS;
4894 return rc;
4895}
4896
4897
4898/**
4899 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
4900 * guest-CPU context.
4901 */
4902static int hmR0VmxSaveGuestGprs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4903{
4904 int rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
4905 rc |= hmR0VmxSaveGuestRsp(pVM, pVCpu, pMixedCtx);
4906 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
4907 return rc;
4908}
4909
4910
4911/**
4912 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
4913 * from the guest-state area in the VMCS.
4914 *
4915 * @param pVM Pointer to the VM.
4916 * @param pVCpu Pointer to the VMCPU.
4917 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4918 * out-of-sync. Make sure to update the required fields
4919 * before using them.
4920 *
4921 * @remarks No-long-jump zone!!!
4922 */
4923DECLINLINE(void) hmR0VmxSaveGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4924{
4925 uint32_t uIntrState = 0;
4926 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
4927 AssertRC(rc);
4928
4929 if (!uIntrState)
4930 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4931 else
4932 {
4933 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
4934 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
4935 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
4936 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx); /* for hmR0VmxLoadGuestIntrState(). */
4937 AssertRC(rc);
4938 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
4939 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
4940 }
4941}
4942
4943
4944/**
4945 * Saves the guest's activity state.
4946 *
4947 * @returns VBox status code.
4948 * @param pVM Pointer to the VM.
4949 * @param pVCpu Pointer to the VMCPU.
4950 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4951 * out-of-sync. Make sure to update the required fields
4952 * before using them.
4953 *
4954 * @remarks No-long-jump zone!!!
4955 */
4956DECLINLINE(int) hmR0VmxSaveGuestActivityState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4957{
4958 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
4959 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_ACTIVITY_STATE;
4960 return VINF_SUCCESS;
4961}
4962
4963
4964/**
4965 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
4966 * the current VMCS into the guest-CPU context.
4967 *
4968 * @returns VBox status code.
4969 * @param pVM Pointer to the VM.
4970 * @param pVCpu Pointer to the VMCPU.
4971 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4972 * out-of-sync. Make sure to update the required fields
4973 * before using them.
4974 *
4975 * @remarks No-long-jump zone!!!
4976 */
4977DECLINLINE(int) hmR0VmxSaveGuestSysenterMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4978{
4979 int rc = VINF_SUCCESS;
4980 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SYSENTER_CS_MSR))
4981 {
4982 uint32_t u32Val = 0;
4983 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
4984 pMixedCtx->SysEnter.cs = u32Val;
4985 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_SYSENTER_CS_MSR;
4986 }
4987
4988 RTGCUINTREG uGCVal = 0;
4989 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
4990 {
4991 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &uGCVal); AssertRCReturn(rc, rc);
4992 pMixedCtx->SysEnter.eip = uGCVal;
4993 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
4994 }
4995 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
4996 {
4997 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &uGCVal); AssertRCReturn(rc, rc);
4998 pMixedCtx->SysEnter.esp = uGCVal;
4999 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
5000 }
5001 return rc;
5002}
5003
5004
5005/**
5006 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
5007 * context.
5008 *
5009 * @returns VBox status code.
5010 * @param pVM Pointer to the VM.
5011 * @param pVCpu Pointer to the VMCPU.
5012 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5013 * out-of-sync. Make sure to update the required fields
5014 * before using them.
5015 *
5016 * @remarks No-long-jump zone!!!
5017 */
5018DECLINLINE(int) hmR0VmxSaveGuestFSBaseMsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5019{
5020 RTGCUINTREG uVal = 0;
5021 int rc = VINF_SUCCESS;
5022 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_FS_BASE_MSR))
5023 {
5024 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &uVal); AssertRCReturn(rc, rc);
5025 pMixedCtx->fs.u64Base = uVal;
5026 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_FS_BASE_MSR;
5027 }
5028 return rc;
5029}
5030
5031
5032/**
5033 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
5034 * context.
5035 *
5036 * @returns VBox status code.
5037 * @param pVM Pointer to the VM.
5038 * @param pVCpu Pointer to the VMCPU.
5039 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5040 * out-of-sync. Make sure to update the required fields
5041 * before using them.
5042 *
5043 * @remarks No-long-jump zone!!!
5044 */
5045DECLINLINE(int) hmR0VmxSaveGuestGSBaseMsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5046{
5047 RTGCUINTREG uVal = 0;
5048 int rc = VINF_SUCCESS;
5049 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_GS_BASE_MSR))
5050 {
5051 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &uVal); AssertRCReturn(rc, rc);
5052 pMixedCtx->gs.u64Base = uVal;
5053 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_GS_BASE_MSR;
5054 }
5055 return rc;
5056}
5057
5058
5059/**
5060 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5061 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK and TSC_AUX.
5062 *
5063 * @returns VBox status code.
5064 * @param pVM Pointer to the VM.
5065 * @param pVCpu Pointer to the VMCPU.
5066 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5067 * out-of-sync. Make sure to update the required fields
5068 * before using them.
5069 *
5070 * @remarks No-long-jump zone!!!
5071 */
5072static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5073{
5074 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5075 return VINF_SUCCESS;
5076
5077 for (unsigned i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5078 {
5079 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5080 pMsr += i;
5081 switch (pMsr->u32IndexMSR)
5082 {
5083 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5084 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5085 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5086 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5087#if 0
5088 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */
5089 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5090#endif
5091 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5092 default:
5093 {
5094 AssertFailed();
5095 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5096 }
5097 }
5098 }
5099 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5100 return VINF_SUCCESS;
5101}
5102
5103
5104/**
5105 * Saves the guest control registers from the current VMCS into the guest-CPU
5106 * context.
5107 *
5108 * @returns VBox status code.
5109 * @param pVM Pointer to the VM.
5110 * @param pVCpu Pointer to the VMCPU.
5111 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5112 * out-of-sync. Make sure to update the required fields
5113 * before using them.
5114 *
5115 * @remarks No-long-jump zone!!!
5116 */
5117DECLINLINE(int) hmR0VmxSaveGuestControlRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5118{
5119 RTGCUINTREG uVal = 0;
5120 RTGCUINTREG uShadow = 0;
5121 int rc = VINF_SUCCESS;
5122
5123 /* Guest CR0. Guest FPU. */
5124 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
5125
5126 /* Guest CR4. */
5127 rc |= hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx);
5128 AssertRCReturn(rc, rc);
5129
5130 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5131 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3))
5132 {
5133 if ( pVM->hm.s.fNestedPaging
5134 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
5135 {
5136 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal);
5137 if (pMixedCtx->cr3 != uVal)
5138 {
5139 CPUMSetGuestCR3(pVCpu, uVal);
5140 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5141 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5142 }
5143
5144 /* We require EFER to check PAE mode. */
5145 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
5146
5147 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5148 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR. */
5149 {
5150 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
5151 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
5152 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
5153 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
5154 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5155 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5156 }
5157 AssertRCReturn(rc, rc);
5158 }
5159 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3;
5160 }
5161 return rc;
5162}
5163
5164
5165/**
5166 * Reads a guest segment register from the current VMCS into the guest-CPU
5167 * context.
5168 *
5169 * @returns VBox status code.
5170 * @param pVCpu Pointer to the VMCPU.
5171 * @param idxSel Index of the selector in the VMCS.
5172 * @param idxLimit Index of the segment limit in the VMCS.
5173 * @param idxBase Index of the segment base in the VMCS.
5174 * @param idxAccess Index of the access rights of the segment in the VMCS.
5175 * @param pSelReg Pointer to the segment selector.
5176 *
5177 * @remarks No-long-jump zone!!!
5178 * @remarks Never call this function directly. Use the VMXLOCAL_READ_SEG() macro
5179 * as that takes care of whether to read from the VMCS cache or not.
5180 */
5181DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5182 PCPUMSELREG pSelReg)
5183{
5184 uint32_t u32Val = 0;
5185 int rc = VMXReadVmcs32(idxSel, &u32Val);
5186 pSelReg->Sel = (uint16_t)u32Val;
5187 pSelReg->ValidSel = (uint16_t)u32Val;
5188 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5189
5190 rc |= VMXReadVmcs32(idxLimit, &u32Val);
5191 pSelReg->u32Limit = u32Val;
5192
5193 RTGCUINTREG uGCVal = 0;
5194 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &uGCVal);
5195 pSelReg->u64Base = uGCVal;
5196
5197 rc |= VMXReadVmcs32(idxAccess, &u32Val);
5198 pSelReg->Attr.u = u32Val;
5199 AssertRCReturn(rc, rc);
5200
5201 /*
5202 * If VT-x marks the segment as unusable, the rest of the attributes are undefined.
5203 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers.
5204 */
5205 if (pSelReg->Attr.u & VMX_SEL_UNUSABLE)
5206 {
5207 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR);
5208 pSelReg->Attr.u = VMX_SEL_UNUSABLE;
5209 }
5210 return rc;
5211}
5212
5213
5214/**
5215 * Saves the guest segment registers from the current VMCS into the guest-CPU
5216 * context.
5217 *
5218 * @returns VBox status code.
5219 * @param pVM Pointer to the VM.
5220 * @param pVCpu Pointer to the VMCPU.
5221 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5222 * out-of-sync. Make sure to update the required fields
5223 * before using them.
5224 *
5225 * @remarks No-long-jump zone!!!
5226 */
5227static int hmR0VmxSaveGuestSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5228{
5229#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5230#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5231 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5232 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5233#else
5234#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5235 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5236 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5237#endif
5238
5239 int rc = VINF_SUCCESS;
5240
5241 /* Guest segment registers. */
5242 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SEGMENT_REGS))
5243 {
5244 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
5245 rc |= VMXLOCAL_READ_SEG(CS, cs);
5246 rc |= VMXLOCAL_READ_SEG(SS, ss);
5247 rc |= VMXLOCAL_READ_SEG(DS, ds);
5248 rc |= VMXLOCAL_READ_SEG(ES, es);
5249 rc |= VMXLOCAL_READ_SEG(FS, fs);
5250 rc |= VMXLOCAL_READ_SEG(GS, gs);
5251 AssertRCReturn(rc, rc);
5252
5253 /* Restore segment attributes for real-on-v86 mode hack. */
5254 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5255 {
5256 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrCS.u;
5257 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrSS.u;
5258 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrDS.u;
5259 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrES.u;
5260 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrFS.u;
5261 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u;
5262 }
5263 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_SEGMENT_REGS;
5264 }
5265
5266 /* Guest LDTR. */
5267 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_LDTR))
5268 {
5269 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5270 AssertRCReturn(rc, rc);
5271 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_LDTR;
5272 }
5273
5274 /* Guest GDTR. */
5275 RTGCUINTREG uGCVal = 0;
5276 uint32_t u32Val = 0;
5277 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_GDTR))
5278 {
5279 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal);
5280 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5281 pMixedCtx->gdtr.pGdt = uGCVal;
5282 pMixedCtx->gdtr.cbGdt = u32Val;
5283 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_GDTR;
5284 }
5285
5286 /* Guest IDTR. */
5287 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_IDTR))
5288 {
5289 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal);
5290 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5291 pMixedCtx->idtr.pIdt = uGCVal;
5292 pMixedCtx->idtr.cbIdt = u32Val;
5293 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_IDTR;
5294 }
5295
5296 /* Guest TR. */
5297 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_TR))
5298 {
5299 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
5300
5301 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5302 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5303 rc |= VMXLOCAL_READ_SEG(TR, tr);
5304 AssertRCReturn(rc, rc);
5305 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_TR;
5306 }
5307 return rc;
5308}
5309
5310
5311/**
5312 * Saves the guest debug registers from the current VMCS into the guest-CPU
5313 * context.
5314 *
5315 * @returns VBox status code.
5316 * @param pVM Pointer to the VM.
5317 * @param pVCpu Pointer to the VMCPU.
5318 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5319 * out-of-sync. Make sure to update the required fields
5320 * before using them.
5321 *
5322 * @remarks No-long-jump zone!!!
5323 */
5324DECLINLINE(int) hmR0VmxSaveGuestDebugRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5325{
5326 int rc = VINF_SUCCESS;
5327 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_DEBUG))
5328 {
5329 RTGCUINTREG uVal;
5330 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_DR7, &uVal); AssertRCReturn(rc, rc);
5331 pMixedCtx->dr[7] = uVal;
5332
5333 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_DEBUG;
5334 }
5335 return rc;
5336}
5337
5338
5339/**
5340 * Saves the guest APIC state from the currentl VMCS into the guest-CPU context.
5341 *
5342 * @returns VBox status code.
5343 * @param pVM Pointer to the VM.
5344 * @param pVCpu Pointer to the VMCPU.
5345 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5346 * out-of-sync. Make sure to update the required fields
5347 * before using them.
5348 *
5349 * @remarks No-long-jump zone!!!
5350 */
5351DECLINLINE(int) hmR0VmxSaveGuestApicState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5352{
5353 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5354 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_APIC_STATE;
5355 return VINF_SUCCESS;
5356}
5357
5358
5359/**
5360 * Saves the entire guest state from the currently active VMCS into the
5361 * guest-CPU context. This essentially VMREADs all guest-data.
5362 *
5363 * @returns VBox status code.
5364 * @param pVM Pointer to the VM.
5365 * @param pVCpu Pointer to the VMCPU.
5366 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5367 * out-of-sync. Make sure to update the required fields
5368 * before using them.
5369 */
5370static int hmR0VmxSaveGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5371{
5372 Assert(pVM);
5373 Assert(pVCpu);
5374 Assert(pMixedCtx);
5375
5376 if (pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL)
5377 return VINF_SUCCESS;
5378
5379 VMMRZCallRing3Disable(pVCpu);
5380
5381 int rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
5382 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGprs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5383
5384 rc = hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
5385 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5386
5387 rc = hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
5388 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5389
5390 rc = hmR0VmxSaveGuestDebugRegs(pVM, pVCpu, pMixedCtx);
5391 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5392
5393 rc = hmR0VmxSaveGuestSysenterMsrs(pVM, pVCpu, pMixedCtx);
5394 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5395
5396 rc = hmR0VmxSaveGuestFSBaseMsr(pVM, pVCpu, pMixedCtx);
5397 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5398
5399 rc = hmR0VmxSaveGuestGSBaseMsr(pVM, pVCpu, pMixedCtx);
5400 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5401
5402 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
5403 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5404
5405 rc = hmR0VmxSaveGuestActivityState(pVM, pVCpu, pMixedCtx);
5406 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5407
5408 rc = hmR0VmxSaveGuestApicState(pVM, pVCpu, pMixedCtx);
5409 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
5410
5411 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL,
5412 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5413
5414 VMMRZCallRing3Enable(pVCpu);
5415 return rc;
5416}
5417
5418
5419/**
5420 * Check per-VM and per-VCPU force flag actions that require us to go back to
5421 * ring-3 for one reason or another.
5422 *
5423 * @returns VBox status code (information status code included).
5424 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5425 * ring-3.
5426 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5427 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5428 * interrupts)
5429 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5430 * all EMTs to be in ring-3.
5431 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5432 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5433 * to the EM loop.
5434 *
5435 * @param pVM Pointer to the VM.
5436 * @param pVCpu Pointer to the VMCPU.
5437 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5438 * out-of-sync. Make sure to update the required fields
5439 * before using them.
5440 */
5441static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5442{
5443 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5444
5445 int rc = VERR_INTERNAL_ERROR_5;
5446 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
5447 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
5448 | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES))
5449 {
5450 /* We need the control registers now, make sure the guest-CPU context is updated. */
5451 rc = hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
5452 AssertRCReturn(rc, rc);
5453
5454 /* Pending HM CR3 sync. */
5455 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5456 {
5457 rc = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5458 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
5459 }
5460 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5461 {
5462 rc = PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5463 AssertRC(rc);
5464 }
5465
5466 /* Pending PGM C3 sync. */
5467 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5468 {
5469 rc = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5470 if (rc != VINF_SUCCESS)
5471 {
5472 AssertRC(rc);
5473 Log2(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
5474 return rc;
5475 }
5476 }
5477
5478 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5479 /* -XXX- what was that about single stepping? */
5480 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5481 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5482 {
5483 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5484 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5485 Log2(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
5486 return rc;
5487 }
5488
5489 /* Pending VM request packets, such as hardware interrupts. */
5490 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5491 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5492 {
5493 Log2(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5494 return VINF_EM_PENDING_REQUEST;
5495 }
5496
5497 /* Pending PGM pool flushes. */
5498 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5499 {
5500 Log2(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5501 return rc = VINF_PGM_POOL_FLUSH_PENDING;
5502 }
5503
5504 /* Pending DMA requests. */
5505 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5506 {
5507 Log2(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5508 return VINF_EM_RAW_TO_R3;
5509 }
5510 }
5511
5512 /* Paranoia. */
5513 Assert(rc != VERR_EM_INTERPRETER);
5514 return VINF_SUCCESS;
5515}
5516
5517
5518/**
5519 * Converts any pending VMX event into a TRPM trap. Typically used when leaving
5520 * VT-x to execute any instruction.
5521 *
5522 * @param pvCpu Pointer to the VMCPU.
5523 */
5524static void hmR0VmxUpdateTRPMTrap(PVMCPU pVCpu)
5525{
5526 if (pVCpu->hm.s.Event.fPending)
5527 {
5528 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5529 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
5530 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
5531 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
5532
5533 /* If a trap was already pending, we did something wrong! */
5534 Assert(TRPMQueryTrap(pVCpu, NULL, NULL) == VERR_TRPM_NO_ACTIVE_TRAP);
5535
5536 /* A page-fault exception during a page-fault would become a double-fault. */
5537 AssertMsg(uVectorType != VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT || uVector != X86_XCPT_PF,
5538 ("%#RX64 uVectorType=%#x uVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uVectorType, uVector));
5539
5540 TRPMEVENT enmTrapType;
5541 switch (uVectorType)
5542 {
5543 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5544 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5545 enmTrapType = TRPM_HARDWARE_INT;
5546 break;
5547 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5548 enmTrapType = TRPM_SOFTWARE_INT;
5549 break;
5550 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5551 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
5552 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5553 enmTrapType = TRPM_TRAP;
5554 break;
5555 default:
5556 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
5557 enmTrapType = TRPM_32BIT_HACK;
5558 break;
5559 }
5560 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
5561 AssertRC(rc);
5562 if (fErrorCodeValid)
5563 TRPMSetErrorCode(pVCpu, uErrorCode);
5564
5565 /* Clear the VT-x state bits now that TRPM has the information. */
5566 pVCpu->hm.s.Event.fPending = false;
5567 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
5568 AssertRC(rc);
5569 }
5570}
5571
5572
5573/**
5574 * Does the necessary state syncing before doing a longjmp to ring-3.
5575 *
5576 * @param pVM Pointer to the VM.
5577 * @param pVCpu Pointer to the VMCPU.
5578 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5579 * out-of-sync. Make sure to update the required fields
5580 * before using them.
5581 * @param rcExit The reason for exiting to ring-3. Can be
5582 * VINF_VMM_UNKNOWN_RING3_CALL.
5583 *
5584 * @remarks No-long-jmp zone!!!
5585 */
5586static void hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5587{
5588 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
5589
5590 int rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
5591 AssertRC(rc);
5592
5593 /* Restore FPU state if necessary and resync on next R0 reentry .*/
5594 if (CPUMIsGuestFPUStateActive(pVCpu))
5595 {
5596 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
5597 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
5598 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
5599 }
5600
5601 /* Restore debug registers if necessary and resync on next R0 reentry. */
5602 if (CPUMIsGuestDebugStateActive(pVCpu))
5603 {
5604 CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
5605 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5606 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
5607 }
5608 else if (CPUMIsHyperDebugStateActive(pVCpu))
5609 {
5610 CPUMR0LoadHostDebugState(pVM, pVCpu);
5611 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
5612 }
5613
5614 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5615}
5616
5617
5618/**
5619 * An action requires us to go back to ring-3. This function does the necessary
5620 * steps before we can safely return to ring-3. This is not the same as longjmps
5621 * to ring-3, this is voluntary.
5622 *
5623 * @param pVM Pointer to the VM.
5624 * @param pVCpu Pointer to the VMCPU.
5625 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5626 * out-of-sync. Make sure to update the required fields
5627 * before using them.
5628 * @param rcExit The reason for exiting to ring-3. Can be
5629 * VINF_VMM_UNKNOWN_RING3_CALL.
5630 */
5631static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5632{
5633 Assert(pVM);
5634 Assert(pVCpu);
5635 Assert(pMixedCtx);
5636 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5637
5638 /* We want to see what the guest-state was before VM-entry, don't resync here, as we will never continue guest execution.*/
5639 if (rcExit == VERR_VMX_INVALID_GUEST_STATE)
5640 return;
5641
5642 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
5643 VMMRZCallRing3Disable(pVCpu);
5644 Log(("hmR0VmxExitToRing3: rcExit=%d\n", rcExit));
5645
5646 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
5647 hmR0VmxUpdateTRPMTrap(pVCpu);
5648
5649 /* Sync. the guest state. */
5650 hmR0VmxLongJmpToRing3(pVM, pVCpu, pMixedCtx, rcExit);
5651 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5652
5653 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
5654 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
5655 | CPUM_CHANGED_LDTR
5656 | CPUM_CHANGED_GDTR
5657 | CPUM_CHANGED_IDTR
5658 | CPUM_CHANGED_TR
5659 | CPUM_CHANGED_HIDDEN_SEL_REGS);
5660
5661 /* On our way back from ring-3 the following needs to be done. */
5662 /** @todo This can change with preemption hooks. */
5663 if (rcExit == VINF_EM_RAW_INTERRUPT)
5664 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
5665 else
5666 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
5667
5668 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
5669 VMMRZCallRing3Enable(pVCpu);
5670}
5671
5672
5673/**
5674 * VMMRZCallRing3 callback wrapper which saves the guest state before we
5675 * longjump to ring-3 and possibly get preempted.
5676 *
5677 * @param pVCpu Pointer to the VMCPU.
5678 * @param enmOperation The operation causing the ring-3 longjump.
5679 * @param pvUser The user argument (pointer to the possibly
5680 * out-of-date guest-CPU context).
5681 *
5682 * @remarks Must never be called with @a enmOperation ==
5683 * VMMCALLRING3_VM_R0_ASSERTION.
5684 */
5685DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
5686{
5687 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
5688 Assert(pVCpu);
5689 Assert(pvUser);
5690 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5691 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5692
5693 VMMRZCallRing3Disable(pVCpu);
5694 Log(("hmR0VmxLongJmpToRing3\n"));
5695 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
5696 VMMRZCallRing3Enable(pVCpu);
5697}
5698
5699
5700/**
5701 * Injects any pending TRPM trap into the VM by updating the VMCS.
5702 *
5703 * @returns VBox status code (informational status code included).
5704 * @param pVM Pointer to the VM.
5705 * @param pVCpu Pointer to the VMCPU.
5706 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5707 * out-of-sync. Make sure to update the required fields
5708 * before using them.
5709 */
5710static int hmR0VmxInjectTRPMTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5711{
5712 if (!TRPMHasTrap(pVCpu))
5713 return VINF_SUCCESS;
5714
5715 uint8_t u8Vector = 0;
5716 TRPMEVENT enmTrpmEvent = TRPM_SOFTWARE_INT;
5717 RTGCUINT uErrCode = 0;
5718
5719 int rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmTrpmEvent, &uErrCode, NULL /* puCr2 */);
5720 AssertRCReturn(rc, rc);
5721 Assert(enmTrpmEvent != TRPM_SOFTWARE_INT);
5722
5723 rc = TRPMResetTrap(pVCpu);
5724 AssertRCReturn(rc, rc);
5725
5726 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5727 uint32_t u32IntrInfo = u8Vector | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5728 if (enmTrpmEvent == TRPM_TRAP)
5729 {
5730 switch (u8Vector)
5731 {
5732 case X86_XCPT_BP:
5733 case X86_XCPT_OF:
5734 {
5735 /* These exceptions must be delivered as software exceptions. They have no error codes associated with them. */
5736 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5737 break;
5738 }
5739
5740 case X86_XCPT_DF:
5741 case X86_XCPT_TS:
5742 case X86_XCPT_NP:
5743 case X86_XCPT_SS:
5744 case X86_XCPT_GP:
5745 case X86_XCPT_PF:
5746 case X86_XCPT_AC:
5747 /* These exceptions must be delivered as hardware exceptions. They have error codes associated with
5748 them which VT-x/VMM pushes to the guest stack. */
5749 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5750 /* no break! */
5751 default:
5752 {
5753 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5754 break;
5755 }
5756 }
5757 }
5758 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5759 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5760 else
5761 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5762
5763 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5764 return hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, uErrCode);
5765}
5766
5767
5768/**
5769 * Checks if there are any pending guest interrupts to be delivered and injects
5770 * them into the VM by updating the VMCS.
5771 *
5772 * @returns VBox status code (informational status codes included).
5773 * @param pVM Pointer to the VM.
5774 * @param pVCpu Pointer to the VMCPU.
5775 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5776 * out-of-sync. Make sure to update the required fields
5777 * before using them.
5778 *
5779 * @remarks Must be called after hmR0VmxLoadGuestIntrState().
5780 */
5781static int hmR0VmxInjectPendingInterrupt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5782{
5783 /* First inject any pending HM interrupts. */
5784 if (pVCpu->hm.s.Event.fPending)
5785 {
5786 int rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, 0 /* cbInstr */,
5787 pVCpu->hm.s.Event.u32ErrCode);
5788 AssertRCReturn(rc, rc);
5789 pVCpu->hm.s.Event.fPending = false;
5790 return rc;
5791 }
5792
5793 /** @todo SMI. SMIs take priority over NMIs. */
5794
5795 /* NMI. NMIs take priority over regular interrupts . */
5796 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5797 {
5798 /* Construct an NMI interrupt and inject it into the VMCS. */
5799 RTGCUINTPTR uIntrInfo;
5800 uIntrInfo = X86_XCPT_NMI;
5801 uIntrInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5802 uIntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5803 int rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, uIntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */);
5804 AssertRCReturn(rc, rc);
5805 return rc;
5806 }
5807
5808 /* We need the guests's RFLAGS for sure from this point on, make sure it is updated. */
5809 int rc = hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
5810 AssertRCReturn(rc, rc);
5811
5812 /* If there isn't any active trap, check if we have pending interrupts and convert them to TRPM traps and deliver them. */
5813 if (!TRPMHasTrap(pVCpu))
5814 {
5815 /* Check if there are guest external interrupts (PIC/APIC) pending. */
5816 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
5817 {
5818 /*
5819 * If the guest can receive interrupts now (interrupts enabled and no interrupt inhibition is active) convert
5820 * the PDM interrupt into a TRPM event and inject it.
5821 */
5822 if ( (pMixedCtx->eflags.u32 & X86_EFL_IF)
5823 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
5824 {
5825 uint8_t u8Interrupt = 0;
5826 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5827 if (RT_SUCCESS(rc))
5828 {
5829 /* Convert pending interrupt from PIC/APIC into TRPM and handle it below. */
5830 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
5831 AssertRCReturn(rc, rc);
5832 }
5833 else
5834 {
5835 /** @todo Does this actually happen? If not turn it into an assertion. */
5836 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
5837 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
5838 }
5839 }
5840 else if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
5841 {
5842 /* Instruct VT-x to cause an interrupt-window exit as soon as the guest is ready to receive interrupts again. */
5843 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
5844 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
5845 AssertRCReturn(rc, rc);
5846 }
5847 /* else we will deliver interrupts whenever the guest exits next and it's in a state to receive interrupts. */
5848 }
5849 }
5850
5851 /* If interrupts can be delivered, inject it into the VM. */
5852 if ( (pMixedCtx->eflags.u32 & X86_EFL_IF)
5853 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5854 && TRPMHasTrap(pVCpu))
5855 {
5856 rc = hmR0VmxInjectTRPMTrap(pVM, pVCpu, pMixedCtx);
5857 AssertRCReturn(rc, rc);
5858 }
5859 return rc;
5860}
5861
5862/**
5863 * Injects an invalid-opcode (#UD) exception into the VM.
5864 *
5865 * @returns VBox status code (informational status code included).
5866 * @param pVM Pointer to the VM.
5867 * @param pVCpu Pointer to the VMCPU.
5868 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5869 * out-of-sync. Make sure to update the required fields
5870 * before using them.
5871 */
5872DECLINLINE(int) hmR0VmxInjectXcptUD(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5873{
5874 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5875 uint32_t u32IntrInfo = X86_XCPT_UD | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5876 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5877 return hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */);
5878}
5879
5880
5881/**
5882 * Injects a double-fault (#DF) exception into the VM.
5883 *
5884 * @returns VBox status code (informational status code included).
5885 * @param pVM Pointer to the VM.
5886 * @param pVCpu Pointer to the VMCPU.
5887 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5888 * out-of-sync. Make sure to update the required fields
5889 * before using them.
5890 */
5891DECLINLINE(int) hmR0VmxInjectXcptDF(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5892{
5893 /* Inject the double-fault. */
5894 uint32_t u32IntrInfo = X86_XCPT_DF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5895 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5896 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5897 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5898 return hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */);
5899}
5900
5901
5902/**
5903 * Injects a debug (#DB) exception into the VM.
5904 *
5905 * @returns VBox status code (informational status code included).
5906 * @param pVM Pointer to the VM.
5907 * @param pVCpu Pointer to the VMCPU.
5908 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5909 * out-of-sync. Make sure to update the required fields
5910 * before using them.
5911 */
5912DECLINLINE(int) hmR0VmxInjectXcptDB(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5913{
5914 /* Inject the debug-exception. */
5915 uint32_t u32IntrInfo = X86_XCPT_DB | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5916 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5917 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5918 return hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */);
5919}
5920
5921
5922/**
5923 * Injects a overflow (#OF) exception into the VM.
5924 *
5925 * @returns VBox status code (informational status code included).
5926 * @param pVM Pointer to the VM.
5927 * @param pVCpu Pointer to the VMCPU.
5928 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5929 * out-of-sync. Make sure to update the required fields
5930 * before using them.
5931 * @param cbInstr The value of RIP that is to be pushed on the guest
5932 * stack.
5933 */
5934DECLINLINE(int) hmR0VmxInjectXcptOF(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
5935{
5936 /* Inject the overflow exception. */
5937 uint32_t u32IntrInfo = X86_XCPT_OF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5938 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5939 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5940 return hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, u32IntrInfo, cbInstr, 0 /* u32ErrCode */);
5941}
5942
5943
5944/**
5945 * Injects a general-protection (#GP) fault into the VM.
5946 *
5947 * @returns VBox status code (informational status code included).
5948 * @param pVM Pointer to the VM.
5949 * @param pVCpu Pointer to the VMCPU.
5950 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5951 * out-of-sync. Make sure to update the required fields
5952 * before using them.
5953 * @param u32ErrorCode The error code associated with the #GP.
5954 */
5955DECLINLINE(int) hmR0VmxInjectXcptGP(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode)
5956{
5957 /* Inject the general-protection fault. */
5958 uint32_t u32IntrInfo = X86_XCPT_GP | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5959 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5960 if (fErrorCodeValid)
5961 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5962 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5963 return hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode);
5964}
5965
5966
5967/**
5968 * Injects a software interrupt (INTn) into the VM.
5969 *
5970 * @returns VBox status code (informational status code included).
5971 * @param pVM Pointer to the VM.
5972 * @param pVCpu Pointer to the VMCPU.
5973 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5974 * out-of-sync. Make sure to update the required fields
5975 * before using them.
5976 * @param uVector The software interrupt vector number.
5977 * @param cbInstr The value of RIP that is to be pushed on the guest
5978 * stack.
5979 */
5980DECLINLINE(int) hmR0VmxInjectIntN(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
5981{
5982 /* Inject the INTn. */
5983 uint32_t u32IntrInfo = uVector | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5984 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5985 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5986 return hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, u32IntrInfo, cbInstr, 0 /* u32ErrCode */);
5987}
5988
5989
5990/**
5991 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
5992 * stack.
5993 *
5994 * @returns VBox status code (information status code included).
5995 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
5996 * @param pVM Pointer to the VM.
5997 * @param pMixedCtx Pointer to the guest-CPU context.
5998 * @param uValue The value to push to the guest stack.
5999 */
6000DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6001{
6002 /*
6003 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6004 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6005 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6006 */
6007 if (pMixedCtx->sp == 1)
6008 return VINF_EM_RESET;
6009 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6010 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6011 AssertRCReturn(rc, rc);
6012 return rc;
6013}
6014
6015
6016/**
6017 * Injects an event into the guest upon VM-entry by updating the relevant fields
6018 * in the VM-entry area in the VMCS.
6019 *
6020 * @returns VBox status code (informational error codes included).
6021 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6022 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6023 *
6024 * @param pVM Pointer to the VM.
6025 * @param pVCpu Pointer to the VMCPU.
6026 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6027 * out-of-sync. Make sure to update the required fields
6028 * before using them.
6029 * @param u64IntrInfo The VM-entry interruption-information field.
6030 * @param cbInstr The VM-entry instruction length in bytes (for software
6031 * interrupts, exceptions and privileged software
6032 * exceptions).
6033 * @param u32ErrCode The VM-entry exception error code.
6034 *
6035 * @remarks No-long-jump zone!!!
6036 */
6037static int hmR0VmxInjectEventVmcs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6038 uint32_t u32ErrCode)
6039{
6040 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6041 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6042 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6043
6044 /* We require CR0 to check if the guest is in real-mode. */
6045 int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
6046 AssertRCReturn(rc, rc);
6047
6048 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6049 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6050
6051 /*
6052 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6053 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6054 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6055 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6056 */
6057 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6058 {
6059 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6060 {
6061 Assert(PDMVmmDevHeapIsEnabled(pVM));
6062 Assert(pVM->hm.s.vmx.pRealModeTSS);
6063
6064 /* Save the required guest state bits from the VMCS. */
6065 rc = hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
6066 rc |= hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
6067 AssertRCReturn(rc, rc);
6068
6069 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6070 const size_t cbIdtEntry = 4;
6071 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6072 {
6073 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6074 if (uVector == X86_XCPT_DF)
6075 return VINF_EM_RESET;
6076 else if (uVector == X86_XCPT_GP)
6077 {
6078 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6079 return hmR0VmxInjectXcptDF(pVM, pVCpu, pMixedCtx);
6080 }
6081
6082 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6083 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6084 return hmR0VmxInjectXcptGP(pVM, pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */);
6085 }
6086
6087 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT 3 or INTO) */
6088 uint16_t uGuestIp = pMixedCtx->ip;
6089 if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6090 {
6091 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6092 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6093 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6094 }
6095 else if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6096 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6097
6098 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6099 uint16_t offIdtEntry = 0;
6100 RTSEL selIdtEntry = 0;
6101 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6102 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6103 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6104 AssertRCReturn(rc, rc);
6105
6106 /* Construct the stack frame for the interrupt/exception handler. */
6107 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6108 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6109 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6110 AssertRCReturn(rc, rc);
6111
6112 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6113 if (rc == VINF_SUCCESS)
6114 {
6115 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6116 pMixedCtx->rip = offIdtEntry;
6117 pMixedCtx->cs.Sel = selIdtEntry;
6118 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6119 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6120 | HM_CHANGED_GUEST_RIP
6121 | HM_CHANGED_GUEST_RFLAGS
6122 | HM_CHANGED_GUEST_RSP;
6123 }
6124 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6125 return rc;
6126 }
6127 else
6128 {
6129 /*
6130 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6131 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6132 */
6133 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6134 }
6135 }
6136
6137 /* Add the valid bit, maybe the caller was lazy. */
6138 u32IntrInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
6139
6140 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6141 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6142 Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6143
6144 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6145 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6146 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6147 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6148 AssertRCReturn(rc, rc);
6149 return rc;
6150}
6151
6152
6153/**
6154 * Enters the VT-x session.
6155 *
6156 * @returns VBox status code.
6157 * @param pVM Pointer to the VM.
6158 * @param pVCpu Pointer to the VMCPU.
6159 * @param pCpu Pointer to the CPU info struct.
6160 */
6161VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
6162{
6163 AssertPtr(pVM);
6164 AssertPtr(pVCpu);
6165 Assert(pVM->hm.s.vmx.fSupported);
6166 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6167 NOREF(pCpu);
6168
6169 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6170
6171 /* Make sure we're in VMX root mode. */
6172 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6173 if (!(u32HostCR4 & X86_CR4_VMXE))
6174 {
6175 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6176 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6177 }
6178
6179 /* Load the active VMCS as the current one. */
6180 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6181 if (RT_FAILURE(rc))
6182 return rc;
6183
6184 /** @todo this will change with preemption hooks where can can VMRESUME as long
6185 * as we're no preempted. */
6186 pVCpu->hm.s.fResumeVM = false;
6187 return VINF_SUCCESS;
6188}
6189
6190
6191/**
6192 * Leaves the VT-x session.
6193 *
6194 * @returns VBox status code.
6195 * @param pVM Pointer to the VM.
6196 * @param pVCpu Pointer to the VMCPU.
6197 * @param pCtx Pointer to the guest-CPU context.
6198 */
6199VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6200{
6201 AssertPtr(pVCpu);
6202 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6203 NOREF(pVM);
6204 NOREF(pCtx);
6205
6206 /** @todo this will change with preemption hooks where we only VMCLEAR when
6207 * we are actually going to be preempted, not all the time like we
6208 * currently do. */
6209 /*
6210 * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
6211 * and mark the VMCS launch-state as "clear".
6212 */
6213 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6214 return rc;
6215}
6216
6217
6218/**
6219 * Saves the host state in the VMCS host-state.
6220 * Sets up the VM-exit MSR-load area.
6221 *
6222 * The CPU state will be loaded from these fields on every successful VM-exit.
6223 *
6224 * @returns VBox status code.
6225 * @param pVM Pointer to the VM.
6226 * @param pVCpu Pointer to the VMCPU.
6227 *
6228 * @remarks No-long-jump zone!!!
6229 */
6230VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
6231{
6232 AssertPtr(pVM);
6233 AssertPtr(pVCpu);
6234 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6235
6236 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6237
6238 /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
6239 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
6240 return VINF_SUCCESS;
6241
6242 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
6243 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6244
6245 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
6246 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6247
6248 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
6249 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6250
6251 /* Reset the host-state-changed flag. */
6252 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
6253 return rc;
6254}
6255
6256
6257/**
6258 * Loads the guest state into the VMCS guest-state area. The CPU state will be
6259 * loaded from these fields on every successful VM-entry.
6260 *
6261 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
6262 * Sets up the VM-entry controls.
6263 * Sets up the appropriate VMX non-root function to execute guest code based on
6264 * the guest CPU mode.
6265 *
6266 * @returns VBox status code.
6267 * @param pVM Pointer to the VM.
6268 * @param pVCpu Pointer to the VMCPU.
6269 * @param pCtx Pointer to the guest-CPU context.
6270 *
6271 * @remarks No-long-jump zone!!!
6272 */
6273VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6274{
6275 AssertPtr(pVM);
6276 AssertPtr(pVCpu);
6277 AssertPtr(pCtx);
6278 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6279
6280 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6281
6282 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
6283
6284 /* Determine real-on-v86 mode. */
6285 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
6286 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
6287 && CPUMIsGuestInRealModeEx(pCtx))
6288 {
6289 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
6290 }
6291
6292 int rc = hmR0VmxLoadGuestEntryCtls(pVM, pVCpu, pCtx);
6293 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6294
6295 rc = hmR0VmxLoadGuestExitCtls(pVM, pVCpu, pCtx);
6296 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6297
6298 rc = hmR0VmxLoadGuestActivityState(pVM, pVCpu, pCtx);
6299 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6300
6301 rc = hmR0VmxLoadGuestControlRegs(pVM, pVCpu, pCtx);
6302 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6303
6304 rc = hmR0VmxLoadGuestSegmentRegs(pVM, pVCpu, pCtx);
6305 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6306
6307 rc = hmR0VmxLoadGuestDebugRegs(pVM, pVCpu, pCtx);
6308 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6309
6310 rc = hmR0VmxLoadGuestMsrs(pVM, pVCpu, pCtx);
6311 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6312
6313 rc = hmR0VmxLoadGuestApicState(pVM, pVCpu, pCtx);
6314 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6315
6316 rc = hmR0VmxLoadGuestGprs(pVM, pVCpu, pCtx);
6317 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6318
6319 rc = hmR0VmxSetupVMRunHandler(pVM, pVCpu, pCtx);
6320 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6321
6322 AssertMsg(!pVCpu->hm.s.fContextUseFlags,
6323 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
6324 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
6325
6326 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
6327 return rc;
6328}
6329
6330
6331/**
6332 * Does the preparations before executing guest code in VT-x.
6333 *
6334 * This may cause longjmps to ring-3 and may even result in rescheduling to the
6335 * recompiler. We must be cautious what we do here regarding committing
6336 * guest-state information into the the VMCS assuming we assuredly execute the
6337 * guest in VT-x. If we fall back to the recompiler after updating VMCS and
6338 * clearing the common-state (TRPM/forceflags), we must undo those changes so
6339 * that the recompiler can (and should) use them when it resumes guest
6340 * execution. Otherwise such operations must be done when we can no longer
6341 * exit to ring-3.
6342 *
6343 * @returns VBox status code (informational status codes included).
6344 * @retval VINF_SUCCESS if we can proceed with running the guest.
6345 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
6346 * into the guest.
6347 * @retval VINF_* scheduling changes, we have to go back to ring-3.
6348 *
6349 * @param pVM Pointer to the VM.
6350 * @param pVCpu Pointer to the VMCPU.
6351 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6352 * out-of-sync. Make sure to update the required fields
6353 * before using them.
6354 * @param pVmxTransient Pointer to the VMX transient structure.
6355 *
6356 * @remarks Called with preemption disabled.
6357 */
6358DECLINLINE(int) hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6359{
6360 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6361
6362#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
6363 PGMRZDynMapFlushAutoSet(pVCpu);
6364#endif
6365
6366 /* Check force flag actions that might require us to go back to ring-3. */
6367 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
6368 if (rc != VINF_SUCCESS)
6369 return rc;
6370
6371 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
6372 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
6373 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
6374 {
6375 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
6376 RTGCPHYS GCPhysApicBase;
6377 GCPhysApicBase = pMixedCtx->msrApicBase;
6378 GCPhysApicBase &= PAGE_BASE_GC_MASK;
6379
6380 /* Unalias any existing mapping. */
6381 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
6382 AssertRCReturn(rc, rc);
6383
6384 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
6385 Log2(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
6386 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
6387 AssertRCReturn(rc, rc);
6388
6389 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
6390 }
6391
6392#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6393 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
6394 pVmxTransient->uEFlags = ASMIntDisableFlags();
6395 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
6396 {
6397 ASMSetFlags(pVmxTransient->uEFlags);
6398 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
6399 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
6400 return VINF_EM_RAW_INTERRUPT;
6401 }
6402 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6403#endif
6404
6405 /*
6406 * This clears force-flags, TRPM traps & pending HM events. We cannot safely restore the state if we exit to ring-3
6407 * (before running guest code) after calling this function (e.g. how do we reverse the effects of calling PDMGetInterrupt()?)
6408 * This is why this is done after all possible exits-to-ring-3 paths in this code.
6409 */
6410 hmR0VmxLoadGuestIntrState(pVM, pVCpu, pMixedCtx);
6411 rc = hmR0VmxInjectPendingInterrupt(pVM, pVCpu, pMixedCtx);
6412 AssertRCReturn(rc, rc);
6413 return rc;
6414}
6415
6416
6417/**
6418 * Prepares to run guest code in VT-x and we've committed to doing so. This
6419 * means there is no backing out to ring-3 or anywhere else at this
6420 * point.
6421 *
6422 * @param pVM Pointer to the VM.
6423 * @param pVCpu Pointer to the VMCPU.
6424 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6425 * out-of-sync. Make sure to update the required fields
6426 * before using them.
6427 * @param pVmxTransient Pointer to the VMX transient structure.
6428 *
6429 * @remarks Called with preemption disabled.
6430 * @remarks No-long-jump zone!!!
6431 */
6432DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6433{
6434 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6435 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6436
6437#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6438 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
6439 pVmxTransient->uEFlags = ASMIntDisableFlags();
6440 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6441#endif
6442
6443 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
6444 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
6445 Log(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
6446 int rc = VINF_SUCCESS;
6447 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
6448 {
6449 rc = hmR0VmxLoadGuestRip(pVM, pVCpu, pMixedCtx);
6450 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
6451 }
6452 else if (pVCpu->hm.s.fContextUseFlags)
6453 {
6454 rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
6455 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
6456 }
6457 AssertRC(rc);
6458 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
6459
6460 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
6461 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
6462 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
6463
6464 Assert(pVM->hm.s.vmx.pfnFlushTaggedTlb);
6465 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
6466 pVM->hm.s.vmx.pfnFlushTaggedTlb(pVM, pVCpu); /* Flush the TLB of guest entries as necessary. */
6467
6468 /* Setup TSC-offsetting or intercept RDTSC(P)s and update the preemption timer. */
6469 if (pVmxTransient->fUpdateTscOffsettingAndPreemptTimer)
6470 {
6471 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu, pMixedCtx);
6472 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
6473 }
6474
6475 /*
6476 * TPR patching (only active for 32-bit guests on 64-bit capable CPUs) when the CPU does not supported virtualizing
6477 * APIC accesses feature (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).
6478 */
6479 if (pVM->hm.s.fTPRPatchingActive)
6480 {
6481 Assert(!CPUMIsGuestInLongMode(pVCpu));
6482
6483 /* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */
6484 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
6485 AssertRC(rc);
6486
6487 /* The patch code uses the LSTAR as it's not used by a guest in 32-bit mode implicitly (i.e. SYSCALL is 64-bit only). */
6488 pVmxTransient->u64LStarMsr = ASMRdMsr(MSR_K8_LSTAR);
6489 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); /* pMixedCtx->msrLSTAR contains the guest's TPR,
6490 see hmR0VmxLoadGuestApicState(). */
6491 }
6492
6493 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
6494 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
6495 to start executing. */
6496}
6497
6498
6499/**
6500 * Performs some essential restoration of state after running guest code in
6501 * VT-x.
6502 *
6503 * @param pVM Pointer to the VM.
6504 * @param pVCpu Pointer to the VMCPU.
6505 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6506 * out-of-sync. Make sure to update the required fields
6507 * before using them.
6508 * @param pVmxTransient Pointer to the VMX transient structure.
6509 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
6510 *
6511 * @remarks Called with interrupts disabled.
6512 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
6513 * unconditionally when it is safe to do so.
6514 */
6515DECLINLINE(void) hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
6516{
6517 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6518 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
6519
6520 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
6521 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
6522 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
6523 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
6524 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
6525
6526 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
6527 {
6528 /** @todo Find a way to fix hardcoding a guestimate. */
6529 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
6530 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
6531 }
6532
6533 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
6534 Assert(!(ASMGetFlags() & X86_EFL_IF));
6535 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
6536
6537 /* Restore the effects of TPR patching if any. */
6538 if (pVM->hm.s.fTPRPatchingActive)
6539 {
6540 int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
6541 AssertRC(rc);
6542 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); /* MSR_K8_LSTAR contains the guest TPR. */
6543 ASMWrMsr(MSR_K8_LSTAR, pVmxTransient->u64LStarMsr);
6544 }
6545
6546 ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */
6547 pVCpu->hm.s.fResumeVM = true; /* Use VMRESUME instead of VMLAUNCH in the next run. */
6548
6549 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
6550 uint32_t uExitReason;
6551 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
6552 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
6553 AssertRC(rc);
6554 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
6555 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
6556
6557 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
6558 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
6559
6560 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
6561 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
6562 {
6563 Log(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
6564 return;
6565 }
6566
6567 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
6568 {
6569 /* Update the guest interruptibility-state from the VMCS. */
6570 hmR0VmxSaveGuestIntrState(pVM, pVCpu, pMixedCtx);
6571
6572 /*
6573 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
6574 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why
6575 * we do it outside of hmR0VmxSaveGuestState() which must never cause longjmps.
6576 */
6577 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
6578 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
6579 {
6580 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
6581 AssertRC(rc);
6582 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
6583 }
6584 }
6585}
6586
6587
6588/**
6589 * Runs the guest code using VT-x.
6590 *
6591 * @returns VBox status code.
6592 * @param pVM Pointer to the VM.
6593 * @param pVCpu Pointer to the VMCPU.
6594 * @param pCtx Pointer to the guest-CPU context.
6595 *
6596 * @remarks Called with preemption disabled.
6597 */
6598VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6599{
6600 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6601 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6602
6603 VMXTRANSIENT VmxTransient;
6604 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
6605 int rc = VERR_INTERNAL_ERROR_5;
6606 unsigned cLoops = 0;
6607
6608 for (;; cLoops++)
6609 {
6610 Assert(!HMR0SuspendPending());
6611 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
6612 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
6613 (unsigned)RTMpCpuId(), cLoops));
6614
6615 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
6616 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
6617 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
6618 if (rc != VINF_SUCCESS)
6619 break;
6620
6621 /*
6622 * No longjmps to ring-3 from this point on!!!
6623 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
6624 * This also disables flushing of the R0-logger instance (if any).
6625 */
6626 VMMRZCallRing3Disable(pVCpu);
6627 VMMRZCallRing3RemoveNotification(pVCpu);
6628 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
6629
6630 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
6631 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
6632
6633 /*
6634 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
6635 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
6636 */
6637 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
6638 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
6639 {
6640 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
6641 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
6642 return rc;
6643 }
6644
6645 /* Handle the VM-exit. */
6646 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
6647 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
6648 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
6649 rc = (*s_apfnVMExitHandlers[VmxTransient.uExitReason])(pVM, pVCpu, pCtx, &VmxTransient);
6650 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
6651 if (rc != VINF_SUCCESS)
6652 break;
6653 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
6654 {
6655 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
6656 rc = VINF_EM_RAW_INTERRUPT;
6657 break;
6658 }
6659 }
6660
6661 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
6662 if (rc == VERR_EM_INTERPRETER)
6663 rc = VINF_EM_RAW_EMULATE_INSTR;
6664 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
6665 return rc;
6666}
6667
6668#if 0
6669DECLINLINE(int) hmR0VmxHandleExit(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, unsigned rcReason)
6670{
6671 int rc;
6672 switch (rcReason)
6673 {
6674 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6675 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6676 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6677 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6678 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6679 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6680 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6681 case VMX_EXIT_XCPT_NMI: rc = hmR0VmxExitXcptNmi(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6682 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6683 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6684 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6685 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6686 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6687 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6688 case VMX_EXIT_PREEMPTION_TIMER: rc = hmR0VmxExitPreemptionTimer(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6689 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6690 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6691 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6692 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6693 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6694 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6695 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6696 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6697 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6698 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6699 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6700 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6701 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6702 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6703 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6704 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6705 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6706 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6707
6708 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6709 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6710 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6711 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6712 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6713 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6714 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6715 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6716 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6717
6718 case VMX_EXIT_VMCALL:
6719 case VMX_EXIT_VMCLEAR:
6720 case VMX_EXIT_VMLAUNCH:
6721 case VMX_EXIT_VMPTRLD:
6722 case VMX_EXIT_VMPTRST:
6723 case VMX_EXIT_VMREAD:
6724 case VMX_EXIT_VMRESUME:
6725 case VMX_EXIT_VMWRITE:
6726 case VMX_EXIT_VMXOFF:
6727 case VMX_EXIT_VMXON:
6728 case VMX_EXIT_INVEPT:
6729 case VMX_EXIT_INVVPID:
6730 case VMX_EXIT_VMFUNC:
6731 rc = hmR0VmxExitInjectXcptUD(pVM, pVCpu, pMixedCtx, pVmxTransient);
6732 break;
6733 default:
6734 rc = hmR0VmxExitErrUndefined(pVM, pVCpu, pMixedCtx, pVmxTransient);
6735 break;
6736 }
6737 return rc;
6738}
6739#endif
6740
6741#ifdef DEBUG
6742/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6743# define VMX_ASSERT_PREEMPT_CPUID_VAR() \
6744 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6745# define VMX_ASSERT_PREEMPT_CPUID() \
6746 do \
6747 { \
6748 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6749 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6750 } while (0)
6751
6752# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() \
6753 do { \
6754 AssertPtr(pVM); \
6755 AssertPtr(pVCpu); \
6756 AssertPtr(pMixedCtx); \
6757 AssertPtr(pVmxTransient); \
6758 Assert(pVmxTransient->fVMEntryFailed == false); \
6759 Assert(ASMIntAreEnabled()); \
6760 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
6761 VMX_ASSERT_PREEMPT_CPUID_VAR(); \
6762 LogFunc(("\n")); \
6763 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
6764 if (VMMR0IsLogFlushDisabled(pVCpu)) \
6765 VMX_ASSERT_PREEMPT_CPUID(); \
6766 } while (0)
6767# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
6768 do { \
6769 LogFunc(("\n")); \
6770 } while(0)
6771#else /* Release builds */
6772# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0)
6773# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
6774#endif
6775
6776
6777/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6778/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6779/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6780/**
6781 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6782 */
6783static DECLCALLBACK(int) hmR0VmxExitExtInt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6784{
6785 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6786 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
6787 return VINF_SUCCESS;
6788}
6789
6790
6791/**
6792 * VM-exit handler for exceptions and NMIs (VMX_EXIT_XCPT_NMI).
6793 */
6794static DECLCALLBACK(int) hmR0VmxExitXcptNmi(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6795{
6796 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6797 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
6798 AssertRCReturn(rc, rc);
6799
6800 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
6801 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT)
6802 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6803
6804 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
6805 return VINF_EM_RAW_INTERRUPT;
6806
6807 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
6808 rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
6809 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
6810 return VINF_SUCCESS;
6811 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
6812 return rc;
6813
6814 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
6815 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
6816 switch (uIntrType)
6817 {
6818 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
6819 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6820 /* no break */
6821 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT: /* Hardware exception. */
6822 {
6823 switch (uVector)
6824 {
6825 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6826 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6827 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6828 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6829 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6830 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6831#ifdef VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS
6832 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
6833 rc = hmR0VmxExitXcptGeneric(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6834 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
6835 rc = hmR0VmxExitXcptGeneric(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6836 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
6837 rc = hmR0VmxExitXcptGeneric(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6838 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
6839 rc = hmR0VmxExitXcptGeneric(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6840 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
6841 rc = hmR0VmxExitXcptGeneric(pVM, pVCpu, pMixedCtx, pVmxTransient); break;
6842#endif
6843 default:
6844 {
6845 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
6846 AssertRCReturn(rc, rc);
6847
6848 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
6849 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6850 {
6851 Assert(pVM->hm.s.vmx.pRealModeTSS);
6852 Assert(PDMVmmDevHeapIsEnabled(pVM));
6853 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
6854 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
6855 AssertRCReturn(rc, rc);
6856 rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
6857 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
6858 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode);
6859 AssertRCReturn(rc, rc);
6860 }
6861 else
6862 {
6863 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
6864 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
6865 }
6866 break;
6867 }
6868 }
6869 break;
6870 }
6871
6872 case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT:
6873 default:
6874 {
6875 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
6876 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
6877 break;
6878 }
6879 }
6880 return rc;
6881}
6882
6883
6884/**
6885 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
6886 */
6887static DECLCALLBACK(int) hmR0VmxExitIntWindow(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6888{
6889 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6890
6891 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
6892 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
6893 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
6894 AssertRCReturn(rc, rc);
6895
6896 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingInterrupt() and resume guest execution. */
6897 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
6898 return VINF_SUCCESS;
6899}
6900
6901
6902/**
6903 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
6904 */
6905static DECLCALLBACK(int) hmR0VmxExitNmiWindow(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6906{
6907 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6908 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
6909 return VERR_VMX_UNEXPECTED_EXIT_CODE;
6910}
6911
6912
6913/**
6914 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
6915 */
6916static DECLCALLBACK(int) hmR0VmxExitWbinvd(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6917{
6918 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6919 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
6920 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
6921 AssertRCReturn(rc, rc);
6922
6923 pMixedCtx->rip += pVmxTransient->cbInstr;
6924 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
6925
6926 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
6927 return VINF_SUCCESS;
6928}
6929
6930
6931/**
6932 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
6933 */
6934static DECLCALLBACK(int) hmR0VmxExitInvd(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6935{
6936 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6937 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
6938 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
6939 AssertRCReturn(rc, rc);
6940
6941 pMixedCtx->rip += pVmxTransient->cbInstr;
6942 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
6943
6944 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
6945 return VINF_SUCCESS;
6946}
6947
6948
6949/**
6950 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
6951 */
6952static DECLCALLBACK(int) hmR0VmxExitCpuid(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6953{
6954 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6955 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
6956 if (RT_LIKELY(rc == VINF_SUCCESS))
6957 {
6958 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
6959 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
6960 AssertRCReturn(rc, rc);
6961 Assert(pVmxTransient->cbInstr == 2);
6962
6963 Log(("hmR0VmxExitCpuid: RIP=%#RX64\n", pMixedCtx->rip));
6964 pMixedCtx->rip += pVmxTransient->cbInstr;
6965 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
6966 }
6967 else
6968 {
6969 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
6970 rc = VERR_EM_INTERPRETER;
6971 }
6972 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
6973 return rc;
6974}
6975
6976
6977/**
6978 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
6979 */
6980static DECLCALLBACK(int) hmR0VmxExitGetsec(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6981{
6982 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6983 int rc = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx);
6984 AssertRCReturn(rc, rc);
6985
6986 if (pMixedCtx->cr4 & X86_CR4_SMXE)
6987 return VINF_EM_RAW_EMULATE_INSTR;
6988
6989 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
6990 return VERR_VMX_UNEXPECTED_EXIT_CODE;
6991}
6992
6993
6994/**
6995 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
6996 */
6997static DECLCALLBACK(int) hmR0VmxExitRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6998{
6999 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7000 int rc = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7001 AssertRCReturn(rc, rc);
7002
7003 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7004 if (RT_LIKELY(rc == VINF_SUCCESS))
7005 {
7006 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7007 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7008 AssertRCReturn(rc, rc);
7009 Assert(pVmxTransient->cbInstr == 2);
7010
7011 pMixedCtx->rip += pVmxTransient->cbInstr;
7012 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7013
7014 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7015 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
7016 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7017 }
7018 else
7019 {
7020 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
7021 rc = VERR_EM_INTERPRETER;
7022 }
7023 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7024 return rc;
7025}
7026
7027
7028/**
7029 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7030 */
7031static DECLCALLBACK(int) hmR0VmxExitRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7032{
7033 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7034 int rc = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7035 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
7036 AssertRCReturn(rc, rc);
7037
7038 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
7039 if (RT_LIKELY(rc == VINF_SUCCESS))
7040 {
7041 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7042 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7043 AssertRCReturn(rc, rc);
7044 Assert(pVmxTransient->cbInstr == 3);
7045
7046 pMixedCtx->rip += pVmxTransient->cbInstr;
7047 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7048
7049 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7050 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
7051 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7052 }
7053 else
7054 {
7055 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
7056 rc = VERR_EM_INTERPRETER;
7057 }
7058 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7059 return rc;
7060}
7061
7062
7063/**
7064 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7065 */
7066static DECLCALLBACK(int) hmR0VmxExitRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7067{
7068 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7069 int rc = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7070 rc |= hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
7071 AssertRCReturn(rc, rc);
7072
7073 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7074 if (RT_LIKELY(rc == VINF_SUCCESS))
7075 {
7076 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7077 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7078 AssertRCReturn(rc, rc);
7079 Assert(pVmxTransient->cbInstr == 2);
7080
7081 pMixedCtx->rip += pVmxTransient->cbInstr;
7082 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7083 }
7084 else
7085 {
7086 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7087 rc = VERR_EM_INTERPRETER;
7088 }
7089 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
7090 return rc;
7091}
7092
7093
7094/**
7095 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7096 */
7097static DECLCALLBACK(int) hmR0VmxExitInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7098{
7099 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7100 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7101 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
7102 AssertRCReturn(rc, rc);
7103
7104 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
7105 rc = VBOXSTRICTRC_VAL(rc2);
7106 if (RT_LIKELY(rc == VINF_SUCCESS))
7107 {
7108 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7109 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7110 AssertRCReturn(rc, rc);
7111
7112 pMixedCtx->rip += pVmxTransient->cbInstr;
7113 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7114 }
7115 else
7116 {
7117 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %RGv failed with %Rrc\n",
7118 pVmxTransient->uExitQualification, rc));
7119 rc = VERR_EM_INTERPRETER;
7120 }
7121 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
7122 return rc;
7123}
7124
7125
7126/**
7127 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7128 */
7129static DECLCALLBACK(int) hmR0VmxExitMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7130{
7131 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7132 int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
7133 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
7134 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
7135 AssertRCReturn(rc, rc);
7136
7137 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7138 if (RT_LIKELY(rc == VINF_SUCCESS))
7139 {
7140 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7141 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7142 AssertRCReturn(rc, rc);
7143
7144 pMixedCtx->rip += pVmxTransient->cbInstr;
7145 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7146 }
7147 else
7148 {
7149 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
7150 rc = VERR_EM_INTERPRETER;
7151 }
7152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
7153 return rc;
7154}
7155
7156
7157/**
7158 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7159 */
7160static DECLCALLBACK(int) hmR0VmxExitMwait(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7161{
7162 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7163 int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
7164 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
7165 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
7166 AssertRCReturn(rc, rc);
7167
7168 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7169 rc = VBOXSTRICTRC_VAL(rc2);
7170 if (RT_LIKELY( rc == VINF_SUCCESS
7171 || rc == VINF_EM_HALT))
7172 {
7173 int rc3 = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7174 rc3 |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7175 AssertRCReturn(rc3, rc3);
7176
7177 pMixedCtx->rip += pVmxTransient->cbInstr;
7178 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7179
7180 if ( rc == VINF_EM_HALT
7181 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
7182 {
7183 rc = VINF_SUCCESS;
7184 }
7185 }
7186 else
7187 {
7188 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
7189 rc = VERR_EM_INTERPRETER;
7190 }
7191 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
7192 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
7193 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
7194 return rc;
7195}
7196
7197
7198/**
7199 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
7200 */
7201static DECLCALLBACK(int) hmR0VmxExitRsm(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7202{
7203 /*
7204 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
7205 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
7206 * executing VMCALL in VMX root operation. If we get here something funny is going on.
7207 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
7208 */
7209 AssertMsgFailed(("Unexpected RSM VM-exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7210 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7211}
7212
7213
7214/**
7215 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
7216 */
7217static DECLCALLBACK(int) hmR0VmxExitSmi(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7218{
7219 /*
7220 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
7221 * root operation. If we get there there is something funny going on.
7222 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
7223 */
7224 AssertMsgFailed(("Unexpected SMI VM-exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7225 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7226}
7227
7228
7229/**
7230 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
7231 */
7232static DECLCALLBACK(int) hmR0VmxExitIoSmi(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7233{
7234 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
7235 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7236 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7237}
7238
7239
7240/**
7241 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
7242 */
7243static DECLCALLBACK(int) hmR0VmxExitSipi(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7244{
7245 /*
7246 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
7247 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
7248 * See Intel spec. 25.3 "Other Causes of VM-exits".
7249 */
7250 AssertMsgFailed(("Unexpected SIPI VM-exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7251 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7252}
7253
7254
7255/**
7256 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
7257 * VM-exit.
7258 */
7259static DECLCALLBACK(int) hmR0VmxExitInitSignal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7260{
7261 /*
7262 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM. See Intel spec. "33.14.1 Default Treatment of
7263 * SMI Delivery" and "29.3 VMX Instructions" for "VMXON". It is -NOT- blocked in VMX non-root operation so we can potentially
7264 * still get these exits. See Intel spec. "23.8 Restrictions on VMX operation".
7265 */
7266 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7267 return VINF_SUCCESS; /** @todo r=ramshankar: correct?. */
7268}
7269
7270
7271/**
7272 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7273 * VM-exit.
7274 */
7275static DECLCALLBACK(int) hmR0VmxExitTripleFault(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7276{
7277 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7278 return VINF_EM_RESET;
7279}
7280
7281
7282/**
7283 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7284 */
7285static DECLCALLBACK(int) hmR0VmxExitHlt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7286{
7287 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7288 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT);
7289 int rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7290 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
7291 AssertRCReturn(rc, rc);
7292
7293 pMixedCtx->rip++;
7294 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7295 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
7296 rc = VINF_SUCCESS;
7297 else
7298 rc = VINF_EM_HALT;
7299
7300 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
7301 return rc;
7302}
7303
7304
7305/**
7306 * VM-exit handler for instructions that result in a #UD exception delivered to the guest.
7307 */
7308static DECLCALLBACK(int) hmR0VmxExitInjectXcptUD(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7309{
7310 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7311 return hmR0VmxInjectXcptUD(pVM, pVCpu, pMixedCtx);
7312}
7313
7314
7315/**
7316 * VM-exit handler for expiry of the VMX preemption timer.
7317 */
7318static DECLCALLBACK(int) hmR0VmxExitPreemptionTimer(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7319{
7320 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7321
7322 /* If we're saving the preemption-timer value on every VM-exit & we've reached zero, reset it up on next VM-entry. */
7323 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
7324 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7325
7326 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7327 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7328 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
7329 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7330}
7331
7332
7333/**
7334 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7335 */
7336static DECLCALLBACK(int) hmR0VmxExitXsetbv(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7337{
7338 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7339 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
7340 /** @todo check if XSETBV is supported by the recompiler. */
7341 return VERR_EM_INTERPRETER;
7342}
7343
7344
7345/**
7346 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7347 */
7348static DECLCALLBACK(int) hmR0VmxExitInvpcid(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7349{
7350 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7351 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
7352 /** @todo implement EMInterpretInvpcid() */
7353 return VERR_EM_INTERPRETER;
7354}
7355
7356
7357/**
7358 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
7359 * Error VM-exit.
7360 */
7361static DECLCALLBACK(int) hmR0VmxExitErrInvalidGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7362{
7363 uint32_t uIntrState;
7364 RTHCUINTREG uHCReg;
7365 uint64_t u64Val;
7366
7367 int rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7368 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
7369 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7370 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
7371 rc |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
7372 AssertRCReturn(rc, rc);
7373
7374 Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
7375 Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7376 Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7377 Log(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
7378
7379 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7380 Log(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7381 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
7382 Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
7383 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
7384 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7385 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
7386 Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
7387 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
7388 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7389 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7390 Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7391
7392 HMDumpRegs(pVM, pVCpu, pMixedCtx);
7393
7394 return VERR_VMX_INVALID_GUEST_STATE;
7395}
7396
7397
7398/**
7399 * VM-exit handler for VM-entry failure due to an MSR-load
7400 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
7401 */
7402static DECLCALLBACK(int) hmR0VmxExitErrMsrLoad(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7403{
7404 AssertMsgFailed(("Unexpected MSR-load exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7405 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7406}
7407
7408
7409/**
7410 * VM-exit handler for VM-entry failure due to a machine-check event
7411 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
7412 */
7413static DECLCALLBACK(int) hmR0VmxExitErrMachineCheck(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7414{
7415 AssertMsgFailed(("Unexpected machine-check event exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7416 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7417}
7418
7419
7420/**
7421 * VM-exit handler for all undefined reasons. Should never ever happen.. in
7422 * theory.
7423 */
7424static DECLCALLBACK(int) hmR0VmxExitErrUndefined(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7425{
7426 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason,
7427 pVM, pVCpu, pMixedCtx));
7428 return VERR_VMX_UNDEFINED_EXIT_CODE;
7429}
7430
7431
7432/**
7433 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
7434 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
7435 * Conditional VM-exit.
7436 */
7437static DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7438{
7439 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7440 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
7441 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
7442 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
7443 return VERR_EM_INTERPRETER;
7444 AssertMsgFailed(("Unexpected XDTR access. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7445 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7446}
7447
7448
7449/**
7450 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
7451 */
7452static DECLCALLBACK(int) hmR0VmxExitRdrand(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7453{
7454 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7455 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
7456 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
7457 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
7458 return VERR_EM_INTERPRETER;
7459 AssertMsgFailed(("Unexpected RDRAND exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7460 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7461}
7462
7463
7464/**
7465 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7466 */
7467static DECLCALLBACK(int) hmR0VmxExitRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7468{
7469 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7470 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
7471 int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
7472 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
7473 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
7474 AssertRCReturn(rc, rc);
7475
7476 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7477 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
7478 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
7479 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7480
7481 /* Update RIP and continue guest execution. */
7482 if (RT_LIKELY(rc == VINF_SUCCESS))
7483 {
7484 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7485 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7486 AssertRCReturn(rc, rc);
7487
7488 Assert(pVmxTransient->cbInstr == 2);
7489 pMixedCtx->rip += pVmxTransient->cbInstr;
7490 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7491 }
7492 return rc;
7493}
7494
7495
7496/**
7497 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7498 */
7499static DECLCALLBACK(int) hmR0VmxExitWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7500{
7501 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7502 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7503 AssertRCReturn(rc, rc);
7504 Assert(pVmxTransient->cbInstr == 2);
7505
7506 /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
7507 if ( pVM->hm.s.fTPRPatchingActive
7508 && pMixedCtx->ecx == MSR_K8_LSTAR)
7509 {
7510 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* Requires EFER but it's always up-to-date. */
7511 if ((pMixedCtx->eax & 0xff) != pVmxTransient->u8GuestTpr)
7512 {
7513 rc = PDMApicSetTPR(pVCpu, pMixedCtx->eax & 0xff);
7514 AssertRC(rc);
7515 }
7516
7517 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7518 AssertRCReturn(rc, rc);
7519 pMixedCtx->rip += pVmxTransient->cbInstr;
7520 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7521 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7522 return VINF_SUCCESS;
7523 }
7524
7525 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
7526 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS))
7527 {
7528 switch (pMixedCtx->ecx)
7529 {
7530 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
7531 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
7532 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
7533 case MSR_K8_FS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_FS_BASE_MSR; break;
7534 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_GS_BASE_MSR; break;
7535 }
7536 }
7537#ifdef DEBUG
7538 else
7539 {
7540 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7541 switch (pMixedCtx->ecx)
7542 {
7543 case MSR_IA32_SYSENTER_CS:
7544 case MSR_IA32_SYSENTER_EIP:
7545 case MSR_IA32_SYSENTER_ESP:
7546 case MSR_K8_FS_BASE:
7547 case MSR_K8_GS_BASE:
7548 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%RX32\n", pMixedCtx->ecx));
7549 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7550 case MSR_K8_LSTAR:
7551 case MSR_K6_STAR:
7552 case MSR_K8_SF_MASK:
7553 case MSR_K8_TSC_AUX:
7554 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%RX32\n", pMixedCtx->ecx));
7555 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7556 }
7557 }
7558#endif
7559
7560 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
7561 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
7562 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
7563 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
7564 AssertRCReturn(rc, rc);
7565
7566 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7567 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
7568 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7569
7570 /* Update guest-state and continue execution. */
7571 if (RT_LIKELY(rc == VINF_SUCCESS))
7572 {
7573 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7574 AssertRCReturn(rc, rc);
7575
7576 pMixedCtx->rip += pVmxTransient->cbInstr;
7577 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7578
7579 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7580 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
7581 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
7582 {
7583 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7584 }
7585 }
7586 return rc;
7587}
7588
7589
7590/**
7591 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7592 */
7593static DECLCALLBACK(int) hmR0VmxExitPause(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7594{
7595 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7596 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT. */
7597 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
7598 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
7599 return VERR_EM_INTERPRETER;
7600 AssertMsgFailed(("Unexpected PAUSE exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
7601 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7602}
7603
7604
7605/**
7606 * VM-exit handler for when the TPR value is lowered below the specified
7607 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7608 */
7609static DECLCALLBACK(int) hmR0VmxExitTprBelowThreshold(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7610{
7611 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7612 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW);
7613
7614 /*
7615 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
7616 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingInterrupt() and
7617 * resume guest execution.
7618 */
7619 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7620 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
7621 return VINF_SUCCESS;
7622}
7623
7624
7625/**
7626 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7627 * VM-exit.
7628 *
7629 * @retval VINF_SUCCESS when guest execution can continue.
7630 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
7631 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7632 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
7633 * recompiler.
7634 */
7635static DECLCALLBACK(int) hmR0VmxExitMovCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7636{
7637 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7638 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7639 AssertRCReturn(rc, rc);
7640
7641 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
7642 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
7643 switch (uAccessType)
7644 {
7645 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
7646 {
7647#if 0
7648 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
7649 rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
7650#else
7651 rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
7652 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
7653 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
7654#endif
7655 AssertRCReturn(rc, rc);
7656
7657 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
7658 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
7659 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
7660 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
7661
7662 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
7663 {
7664 case 0: /* CR0 */
7665 Log(("CR0 write rc=%d\n", rc));
7666 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7667 break;
7668 case 2: /* CR2 */
7669 Log(("CR2 write rc=%d\n", rc));
7670 break;
7671 case 3: /* CR3 */
7672 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
7673 Log(("CR3 write rc=%d\n", rc));
7674 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
7675 break;
7676 case 4: /* CR4 */
7677 Log(("CR4 write rc=%d\n", rc));
7678 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
7679 break;
7680 case 8: /* CR8 */
7681 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
7682 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
7683 /* We don't need to update HM_CHANGED_VMX_GUEST_APIC_STATE here as this -cannot- happen with TPR shadowing. */
7684 break;
7685 default:
7686 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
7687 break;
7688 }
7689
7690 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
7691 break;
7692 }
7693
7694 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
7695 {
7696 /* EMInterpretCRxRead() requires EFER MSR, CS. */
7697 rc = hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
7698 AssertRCReturn(rc, rc);
7699 Assert( !pVM->hm.s.fNestedPaging
7700 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
7701 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
7702
7703 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
7704 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
7705 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
7706
7707 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
7708 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
7709 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
7710 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
7711 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
7712 Log(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
7713 break;
7714 }
7715
7716 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
7717 {
7718 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
7719 AssertRCReturn(rc, rc);
7720 rc = EMInterpretCLTS(pVM, pVCpu);
7721 AssertRCReturn(rc, rc);
7722 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7723 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
7724 Log(("CRX CLTS write rc=%d\n", rc));
7725 break;
7726 }
7727
7728 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
7729 {
7730 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
7731 AssertRCReturn(rc, rc);
7732 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
7733 if (RT_LIKELY(rc == VINF_SUCCESS))
7734 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7735 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
7736 Log(("CRX LMSW write rc=%d\n", rc));
7737 break;
7738 }
7739
7740 default:
7741 {
7742 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
7743 return VERR_VMX_UNEXPECTED_EXCEPTION;
7744 }
7745 }
7746
7747 /* Validate possible error codes. */
7748 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3);
7749 if (RT_SUCCESS(rc))
7750 {
7751 int rc2 = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7752 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7753 AssertRCReturn(rc2, rc2);
7754 pMixedCtx->rip += pVmxTransient->cbInstr;
7755 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7756 }
7757
7758 return rc;
7759}
7760
7761
7762/**
7763 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
7764 * VM-exit.
7765 */
7766static DECLCALLBACK(int) hmR0VmxExitIoInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7767{
7768 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7769
7770 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7771 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7772 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
7773 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
7774 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
7775 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
7776 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
7777 AssertRCReturn(rc, rc);
7778
7779 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
7780 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
7781 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
7782 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
7783 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
7784 bool fIOString = (VMX_EXIT_QUALIFICATION_IO_STRING(pVmxTransient->uExitQualification) == 1);
7785 Assert(uIOWidth == 0 || uIOWidth == 1 || uIOWidth == 3);
7786
7787 /* I/O operation lookup arrays. */
7788 static const uint32_t s_aIOSize[4] = { 1, 2, 0, 4 }; /* Size of the I/O Accesses. */
7789 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
7790
7791 const uint32_t cbSize = s_aIOSize[uIOWidth];
7792 const uint32_t cbInstr = pVmxTransient->cbInstr;
7793 if (fIOString)
7794 {
7795 /* INS/OUTS - I/O String instruction. */
7796 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
7797 /** @todo for now manually disassemble later optimize by getting the fields from
7798 * the VMCS. */
7799 /** @todo VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR contains the flat pointer
7800 * operand of the instruction. VMX_VMCS32_RO_EXIT_INSTR_INFO contains
7801 * segment prefix info. */
7802 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
7803 if (RT_SUCCESS(rc))
7804 {
7805 if (fIOWrite)
7806 {
7807 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
7808 (DISCPUMODE)pDis->uAddrMode, cbSize);
7809 rc = VBOXSTRICTRC_VAL(rc2);
7810 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
7811 }
7812 else
7813 {
7814 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
7815 (DISCPUMODE)pDis->uAddrMode, cbSize);
7816 rc = VBOXSTRICTRC_VAL(rc2);
7817 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
7818 }
7819 }
7820 else
7821 {
7822 AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));
7823 rc = VINF_EM_RAW_EMULATE_INSTR;
7824 }
7825 }
7826 else
7827 {
7828 /* IN/OUT - I/O instruction. */
7829 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
7830 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(pVmxTransient->uExitQualification));
7831 if (fIOWrite)
7832 {
7833 VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbSize);
7834 rc = VBOXSTRICTRC_VAL(rc2);
7835 if (rc == VINF_IOM_R3_IOPORT_WRITE)
7836 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
7837 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
7838 }
7839 else
7840 {
7841 uint32_t u32Result = 0;
7842 VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbSize);
7843 rc = VBOXSTRICTRC_VAL(rc2);
7844 if (IOM_SUCCESS(rc))
7845 {
7846 /* Save result of I/O IN instr. in AL/AX/EAX. */
7847 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
7848 }
7849 else if (rc == VINF_IOM_R3_IOPORT_READ)
7850 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
7851 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
7852 }
7853 }
7854
7855 if (IOM_SUCCESS(rc))
7856 {
7857 pMixedCtx->rip += cbInstr;
7858 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7859 if (RT_LIKELY(rc == VINF_SUCCESS))
7860 {
7861 rc = hmR0VmxSaveGuestDebugRegs(pVM, pVCpu, pMixedCtx); /* For DR7. */
7862 AssertRCReturn(rc, rc);
7863
7864 /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
7865 if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK)
7866 {
7867 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
7868 for (unsigned i = 0; i < 4; i++)
7869 {
7870 unsigned uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
7871 if ( ( uIOPort >= pMixedCtx->dr[i]
7872 && uIOPort < pMixedCtx->dr[i] + uBPLen)
7873 && (pMixedCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
7874 && (pMixedCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
7875 {
7876 Assert(CPUMIsGuestDebugStateActive(pVCpu));
7877 uint64_t uDR6 = ASMGetDR6();
7878
7879 /* Clear all breakpoint status flags and set the one we just hit. */
7880 uDR6 &= ~(X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3);
7881 uDR6 |= (uint64_t)RT_BIT(i);
7882
7883 /*
7884 * Note: AMD64 Architecture Programmer's Manual 13.1:
7885 * Bits 15:13 of the DR6 register is never cleared by the processor and must
7886 * be cleared by software after the contents have been read.
7887 */
7888 ASMSetDR6(uDR6);
7889
7890 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7891 pMixedCtx->dr[7] &= ~X86_DR7_GD;
7892
7893 /* Paranoia. */
7894 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits reserved. */
7895 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
7896 pMixedCtx->dr[7] |= 0x400; /* MB1. */
7897
7898 /* Resync DR7 */
7899 /** @todo probably cheaper to just reload DR7, nothing else needs changing. */
7900 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
7901
7902 /* Inject #DB and get on with guest execution. */
7903 rc = hmR0VmxInjectXcptDB(pVM, pVCpu, pMixedCtx);
7904 AssertRCReturn(rc, rc);
7905 break;
7906 }
7907 }
7908 }
7909 }
7910 }
7911
7912#ifdef DEBUG
7913 if (rc == VINF_IOM_R3_IOPORT_READ)
7914 Assert(!fIOWrite);
7915 else if (rc == VINF_IOM_R3_IOPORT_WRITE)
7916 Assert(fIOWrite);
7917 else
7918 {
7919 AssertMsg( RT_FAILURE(rc)
7920 || rc == VINF_SUCCESS
7921 || rc == VINF_EM_RAW_EMULATE_INSTR
7922 || rc == VINF_EM_RAW_GUEST_TRAP
7923 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
7924 }
7925#endif
7926
7927 return rc;
7928}
7929
7930
7931/**
7932 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
7933 * VM-exit.
7934 */
7935static DECLCALLBACK(int) hmR0VmxExitTaskSwitch(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7936{
7937 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7938
7939 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
7940 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7941 AssertRCReturn(rc, rc);
7942 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
7943 {
7944 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
7945 AssertRCReturn(rc, rc);
7946 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
7947 {
7948 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
7949 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
7950 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
7951 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
7952 {
7953 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
7954 pVCpu->hm.s.Event.fPending = true;
7955 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
7956 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
7957 AssertRCReturn(rc, rc);
7958 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringErrorCode))
7959 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
7960 else
7961 pVCpu->hm.s.Event.u32ErrCode = 0;
7962 }
7963 }
7964 }
7965 /** @todo Emulate task switch someday, currently just going back to ring-3 for
7966 * emulation. */
7967 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
7968 return VERR_EM_INTERPRETER;
7969}
7970
7971
7972/**
7973 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
7974 */
7975static DECLCALLBACK(int) hmR0VmxExitMtf(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7976{
7977 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7978 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG);
7979 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
7980 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
7981 AssertRCReturn(rc, rc);
7982 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
7983 return VINF_EM_DBG_STOP;
7984}
7985
7986
7987/**
7988 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
7989 */
7990static DECLCALLBACK(int) hmR0VmxExitApicAccess(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7991{
7992 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7993 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7994
7995 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
7996 rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
7997 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
7998 return VINF_SUCCESS;
7999 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8000 return rc;
8001
8002#if 0
8003 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
8004 * just sync the whole thing. */
8005 rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
8006#else
8007 /* Aggressive state sync. for now. */
8008 rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
8009 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
8010 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
8011#endif
8012 AssertRCReturn(rc, rc);
8013
8014 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8015 unsigned uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
8016 switch (uAccessType)
8017 {
8018 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8019 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8020 {
8021 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
8022 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
8023 {
8024 AssertMsgFailed(("hmR0VmxExitApicAccess: can't touch TPR offset while using TPR shadowing.\n"));
8025 }
8026
8027 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
8028 GCPhys &= PAGE_BASE_GC_MASK;
8029 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
8030 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
8031 CPUMCTX2CORE(pMixedCtx), GCPhys);
8032 rc = VBOXSTRICTRC_VAL(rc2);
8033 Log(("ApicAccess %RGp %#x\n", GCPhys, VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
8034 if ( rc == VINF_SUCCESS
8035 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8036 || rc == VERR_PAGE_NOT_PRESENT)
8037 {
8038 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8039 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8040 rc = VINF_SUCCESS;
8041 }
8042 break;
8043 }
8044
8045 default:
8046 rc = VINF_EM_RAW_EMULATE_INSTR;
8047 break;
8048 }
8049
8050 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
8051 return rc;
8052}
8053
8054
8055/**
8056 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8057 * VM-exit.
8058 */
8059static DECLCALLBACK(int) hmR0VmxExitMovDRx(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8060{
8061 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8062
8063 /* We should -not- get this VM-exit if the guest is debugging. */
8064 if (CPUMIsGuestDebugStateActive(pVCpu))
8065 {
8066 AssertMsgFailed(("Unexpected MOV DRx exit. pVM=%p pVCpu=%p pMixedCtx=%p\n", pVM, pVCpu, pMixedCtx));
8067 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8068 }
8069
8070 int rc = VERR_INTERNAL_ERROR_5;
8071 if ( !DBGFIsStepping(pVCpu)
8072 && !CPUMIsHyperDebugStateActive(pVCpu))
8073 {
8074 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
8075
8076 /* Don't intercept MOV DRx. */
8077 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
8078 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
8079 AssertRCReturn(rc, rc);
8080
8081 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8082 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
8083 AssertRC(rc);
8084 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8085
8086#ifdef VBOX_WITH_STATISTICS
8087 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8088 AssertRCReturn(rc, rc);
8089 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8090 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8091 else
8092 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8093#endif
8094 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
8095 return VINF_SUCCESS;
8096 }
8097
8098 /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first
8099 * time and restore DRx registers afterwards */
8100 /*
8101 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
8102 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
8103 */
8104 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8105 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
8106 AssertRCReturn(rc, rc);
8107
8108 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8109 {
8110 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8111 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
8112 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
8113 if (RT_SUCCESS(rc))
8114 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8115 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8116 }
8117 else
8118 {
8119 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8120 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
8121 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
8122 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8123 }
8124
8125 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8126 if (RT_SUCCESS(rc))
8127 {
8128 int rc2 = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
8129 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8130 AssertRCReturn(rc2, rc2);
8131 pMixedCtx->rip += pVmxTransient->cbInstr;
8132 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8133 }
8134 return rc;
8135}
8136
8137
8138/**
8139 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8140 * Conditional VM-exit.
8141 */
8142static DECLCALLBACK(int) hmR0VmxExitEptMisconfig(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8143{
8144 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8145 Assert(pVM->hm.s.fNestedPaging);
8146
8147 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8148 int rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
8149 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8150 return VINF_SUCCESS;
8151 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8152 return rc;
8153
8154 RTGCPHYS GCPhys = 0;
8155 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8156
8157#if 0
8158 rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); /** @todo Can we do better? */
8159#else
8160 /* Aggressive state sync. for now. */
8161 rc |= hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
8162 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
8163 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
8164#endif
8165 AssertRCReturn(rc, rc);
8166
8167 /*
8168 * If we succeed, resume guest execution.
8169 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8170 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8171 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8172 * weird case. See @bugref{6043}.
8173 */
8174 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
8175 Log(("EPT misconfig at %#RX64 RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
8176 rc = VBOXSTRICTRC_VAL(rc2);
8177 if ( rc == VINF_SUCCESS
8178 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8179 || rc == VERR_PAGE_NOT_PRESENT)
8180 {
8181 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8182 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8183 return VINF_SUCCESS;
8184 }
8185 return rc;
8186}
8187
8188
8189/**
8190 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8191 * VM-exit.
8192 */
8193static DECLCALLBACK(int) hmR0VmxExitEptViolation(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8194{
8195 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8196 Assert(pVM->hm.s.fNestedPaging);
8197
8198 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8199 int rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
8200 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8201 return VINF_SUCCESS;
8202 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8203 return rc;
8204
8205 RTGCPHYS GCPhys = 0;
8206 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8207 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8208#if 0
8209 rc |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); /** @todo Can we do better? */
8210#else
8211 /* Aggressive state sync. for now. */
8212 rc |= hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
8213 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
8214 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
8215#endif
8216 AssertRCReturn(rc, rc);
8217
8218 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
8219 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RGv", pVmxTransient->uExitQualification));
8220
8221 RTGCUINT uErrorCode = 0;
8222 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
8223 uErrorCode |= X86_TRAP_PF_ID;
8224 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
8225 uErrorCode |= X86_TRAP_PF_RW;
8226 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
8227 uErrorCode |= X86_TRAP_PF_P;
8228
8229 TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
8230 TRPMSetErrorCode(pVCpu, uErrorCode);
8231 TRPMSetFaultAddress(pVCpu, GCPhys);
8232
8233 Log(("EPT violation %#x at %#RGv ErrorCode %#x CS:EIP=%04x:%#RX64\n", (uint32_t)pVmxTransient->uExitQualification, GCPhys,
8234 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8235
8236 /* Handle the pagefault trap for the nested shadow table. */
8237 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
8238 TRPMResetTrap(pVCpu);
8239
8240 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8241 if ( rc == VINF_SUCCESS
8242 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8243 || rc == VERR_PAGE_NOT_PRESENT)
8244 {
8245 /* Successfully synced our shadow page tables or emulation MMIO instruction. */
8246 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
8247 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8248 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8249 return VINF_SUCCESS;
8250 }
8251
8252 Log(("EPT return to ring-3 rc=%d\n"));
8253 return rc;
8254}
8255
8256
8257/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8258/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
8259/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8260/**
8261 * VM-exit exception handler for #MF (Math Fault: floating point exception).
8262 */
8263static DECLCALLBACK(int) hmR0VmxExitXcptMF(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8264{
8265 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8266
8267 int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
8268 AssertRCReturn(rc, rc);
8269 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8270
8271 if (!(pMixedCtx->cr0 & X86_CR0_NE))
8272 {
8273 /* Old-style FPU error reporting needs some extra work. */
8274 /** @todo don't fall back to the recompiler, but do it manually. */
8275 return VERR_EM_INTERPRETER;
8276 }
8277 rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
8278 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8279 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode);
8280 AssertRCReturn(rc, rc);
8281 return rc;
8282}
8283
8284
8285/**
8286 * VM-exit exception handler for #BP (Breakpoint exception).
8287 */
8288static DECLCALLBACK(int) hmR0VmxExitXcptBP(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8289{
8290 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8291
8292 /** @todo Try optimize this by not saving the entire guest state unless
8293 * really needed. */
8294 int rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
8295 AssertRCReturn(rc, rc);
8296 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8297
8298 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8299 if (rc == VINF_EM_RAW_GUEST_TRAP)
8300 {
8301 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8302 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8303 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8304 AssertRCReturn(rc, rc);
8305
8306 rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
8307 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8308 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode);
8309 AssertRCReturn(rc, rc);
8310 }
8311
8312 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
8313 return rc;
8314}
8315
8316
8317/**
8318 * VM-exit exception handler for #DB (Debug exception).
8319 */
8320static DECLCALLBACK(int) hmR0VmxExitXcptDB(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8321{
8322 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8323
8324 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8325 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
8326 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
8327 AssertRCReturn(rc, rc);
8328
8329 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
8330 uint64_t uDR6 = X86_DR6_INIT_VAL;
8331 uDR6 |= (pVmxTransient->uExitQualification
8332 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
8333 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
8334 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8335 if (rc == VINF_EM_RAW_GUEST_TRAP)
8336 {
8337 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
8338 pMixedCtx->dr[6] = uDR6;
8339
8340 if (CPUMIsGuestDebugStateActive(pVCpu))
8341 ASMSetDR6(pMixedCtx->dr[6]);
8342
8343 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8344 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8345
8346 /* Paranoia. */
8347 pMixedCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
8348 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
8349 pMixedCtx->dr[7] |= 0x400; /* must be one */
8350
8351 /* Resync DR7. */
8352 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
8353
8354 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8355 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8356 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8357 rc |= hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
8358 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8359 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode);
8360 AssertRCReturn(rc,rc);
8361 return rc;
8362 }
8363 /* Return to ring 3 to deal with the debug exit code. */
8364 return rc;
8365}
8366
8367
8368/**
8369 * VM-exit exception handler for #NM (Device-not-available exception: floating
8370 * point exception).
8371 */
8372static DECLCALLBACK(int) hmR0VmxExitXcptNM(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8373{
8374 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8375
8376#ifndef VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS
8377 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8378#endif
8379
8380 /* We require CR0 and EFER. EFER is always up-to-date. */
8381 int rc = hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
8382 AssertRCReturn(rc, rc);
8383
8384 /* Lazy FPU loading; Load the guest-FPU state transparently and continue execution of the guest. */
8385 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8386 if (rc == VINF_SUCCESS)
8387 {
8388 Assert(CPUMIsGuestFPUStateActive(pVCpu));
8389 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8390 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
8391 return VINF_SUCCESS;
8392 }
8393
8394 /* Forward #NM to the guest. */
8395 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
8396 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8397 AssertRCReturn(rc, rc);
8398 rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
8399 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8400 pVmxTransient->cbInstr, 0 /* error code */);
8401 AssertRCReturn(rc, rc);
8402 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
8403 return rc;
8404}
8405
8406
8407/**
8408 * VM-exit exception handler for #GP (General-protection exception).
8409 *
8410 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
8411 */
8412static DECLCALLBACK(int) hmR0VmxExitXcptGP(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8413{
8414 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8415
8416 int rc = VERR_INTERNAL_ERROR_5;
8417
8418 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8419 {
8420#ifdef VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS
8421 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
8422 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8423 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8424 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8425 rc |= hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
8426 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8427 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode);
8428 AssertRCReturn(rc, rc);
8429 return rc;
8430#else
8431 /* We don't intercept #GP. */
8432 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
8433 return VERR_VMX_UNEXPECTED_EXCEPTION;
8434#endif
8435 }
8436
8437 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
8438 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
8439
8440 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
8441 rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
8442 AssertRCReturn(rc, rc);
8443
8444 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8445 unsigned int cbOp = 0;
8446 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
8447 if (RT_SUCCESS(rc))
8448 {
8449 rc = VINF_SUCCESS;
8450 Assert(cbOp == pDis->cbInstr);
8451 Log2(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8452 switch (pDis->pCurInstr->uOpcode)
8453 {
8454 case OP_CLI:
8455 pMixedCtx->eflags.Bits.u1IF = 0;
8456 pMixedCtx->rip += pDis->cbInstr;
8457 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8458 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
8459 break;
8460
8461 case OP_STI:
8462 pMixedCtx->eflags.Bits.u1IF = 1;
8463 pMixedCtx->rip += pDis->cbInstr;
8464 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
8465 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
8466 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8467 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
8468 break;
8469
8470 case OP_HLT:
8471 rc = VINF_EM_HALT;
8472 pMixedCtx->rip += pDis->cbInstr;
8473 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8474 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
8475 break;
8476
8477 case OP_POPF:
8478 {
8479 Log(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8480 uint32_t cbParm = 0;
8481 uint32_t uMask = 0;
8482 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8483 {
8484 cbParm = 4;
8485 uMask = 0xffffffff;
8486 }
8487 else
8488 {
8489 cbParm = 2;
8490 uMask = 0xffff;
8491 }
8492
8493 /* Get the stack pointer & pop the contents of the stack onto EFlags. */
8494 RTGCPTR GCPtrStack = 0;
8495 X86EFLAGS uEflags;
8496 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8497 &GCPtrStack);
8498 if (RT_SUCCESS(rc))
8499 {
8500 Assert(sizeof(uEflags.u32) >= cbParm);
8501 uEflags.u32 = 0;
8502 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u32, cbParm);
8503 }
8504 if (RT_FAILURE(rc))
8505 {
8506 rc = VERR_EM_INTERPRETER;
8507 break;
8508 }
8509 Log(("POPF %x -> %RGv mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
8510 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8511 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
8512 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
8513 pMixedCtx->eflags.Bits.u1RF = 0;
8514 pMixedCtx->esp += cbParm;
8515 pMixedCtx->esp &= uMask;
8516 pMixedCtx->rip += pDis->cbInstr;
8517 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
8518 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
8519 break;
8520 }
8521
8522 case OP_PUSHF:
8523 {
8524 uint32_t cbParm = 0;
8525 uint32_t uMask = 0;
8526 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8527 {
8528 cbParm = 4;
8529 uMask = 0xffffffff;
8530 }
8531 else
8532 {
8533 cbParm = 2;
8534 uMask = 0xffff;
8535 }
8536
8537 /* Get the stack pointer & push the contents of eflags onto the stack. */
8538 RTGCPTR GCPtrStack = 0;
8539 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
8540 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
8541 if (RT_FAILURE(rc))
8542 {
8543 rc = VERR_EM_INTERPRETER;
8544 break;
8545 }
8546 X86EFLAGS uEflags;
8547 uEflags = pMixedCtx->eflags;
8548 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
8549 uEflags.Bits.u1RF = 0;
8550 uEflags.Bits.u1VM = 0;
8551
8552 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u, cbParm);
8553 if (RT_FAILURE(rc))
8554 {
8555 rc = VERR_EM_INTERPRETER;
8556 break;
8557 }
8558 Log(("PUSHF %x -> %RGv\n", uEflags.u, GCPtrStack));
8559 pMixedCtx->esp -= cbParm;
8560 pMixedCtx->esp &= uMask;
8561 pMixedCtx->rip += pDis->cbInstr;
8562 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
8563 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
8564 break;
8565 }
8566
8567 case OP_IRET:
8568 {
8569 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
8570 * instruction reference. */
8571 RTGCPTR GCPtrStack = 0;
8572 uint32_t uMask = 0xffff;
8573 uint16_t aIretFrame[3];
8574 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
8575 {
8576 rc = VERR_EM_INTERPRETER;
8577 break;
8578 }
8579 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8580 &GCPtrStack);
8581 if (RT_SUCCESS(rc))
8582 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
8583 if (RT_FAILURE(rc))
8584 {
8585 rc = VERR_EM_INTERPRETER;
8586 break;
8587 }
8588 pMixedCtx->eip = 0;
8589 pMixedCtx->ip = aIretFrame[0];
8590 pMixedCtx->cs.Sel = aIretFrame[1];
8591 pMixedCtx->cs.ValidSel = aIretFrame[1];
8592 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
8593 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8594 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
8595 pMixedCtx->sp += sizeof(aIretFrame);
8596 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
8597 | HM_CHANGED_GUEST_RFLAGS;
8598 Log(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
8599 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
8600 break;
8601 }
8602
8603 case OP_INT:
8604 {
8605 uint16_t uVector = pDis->Param1.uValue & 0xff;
8606 rc = hmR0VmxInjectIntN(pVM, pVCpu, pMixedCtx, uVector, pDis->cbInstr);
8607 AssertRCReturn(rc, rc);
8608 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
8609 break;
8610 }
8611
8612 case OP_INTO:
8613 {
8614 if (pMixedCtx->eflags.Bits.u1OF)
8615 {
8616 rc = hmR0VmxInjectXcptOF(pVM, pVCpu, pMixedCtx, pDis->cbInstr);
8617 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
8618 }
8619 break;
8620 }
8621
8622 default:
8623 {
8624 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
8625 EMCODETYPE_SUPERVISOR);
8626 rc = VBOXSTRICTRC_VAL(rc2);
8627 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
8628 Log2(("#GP rc=%Rrc\n", rc));
8629 break;
8630 }
8631 }
8632 }
8633 else
8634 rc = VERR_EM_INTERPRETER;
8635
8636 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT
8637 || rc == VINF_EM_RESET /* injection caused triple fault */,
8638 ("#GP Unexpected rc=%Rrc\n", rc));
8639 return rc;
8640}
8641
8642
8643/**
8644 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
8645 * the exception reported in the VMX transient structure back into the VM.
8646 *
8647 * @remarks Requires uExitIntrInfo, uExitIntrErrorCode, cbInstr fields in the
8648 * VMX transient structure to be up-to-date.
8649 */
8650static DECLCALLBACK(int) hmR0VmxExitXcptGeneric(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8651{
8652 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8653
8654 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
8655 hmR0VmxCheckExitDueToEventDelivery(). */
8656 int rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
8657 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8658 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode);
8659 AssertRCReturn(rc, rc);
8660 return rc;
8661}
8662
8663
8664/**
8665 * VM-exit exception handler for #PF (Page-fault exception).
8666 */
8667static DECLCALLBACK(int) hmR0VmxExitXcptPF(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8668{
8669 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8670
8671 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8672 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8673 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8674 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8675 AssertRCReturn(rc, rc);
8676
8677#if defined(VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS) || defined(VBOX_ALWAYS_TRAP_PF)
8678 if (pVM->hm.s.fNestedPaging)
8679 {
8680 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
8681 {
8682 pMixedCtx->cr2 = pVmxTransient->uExitQualification;
8683 rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
8684 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8685 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode);
8686 AssertRCReturn(rc, rc);
8687 }
8688 else
8689 {
8690 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8691 Assert(!pVCpu->hm.s.Event.fPending);
8692 rc = hmR0VmxInjectXcptDF(pVM, pVCpu, pMixedCtx);
8693 }
8694 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8695 return rc;
8696 }
8697#else
8698 Assert(!pVM->hm.s.fNestedPaging);
8699#endif
8700
8701#ifdef VBOX_HM_WITH_GUEST_PATCHING
8702 rc = hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
8703 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
8704 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
8705 AssertRCReturn(rc, rc);
8706 /* Shortcut for APIC TPR access, only for 32-bit guests. */
8707 if ( pVM->hm.s.fTRPPatchingAllowed
8708 && pVM->hm.s.pGuestPatchMem
8709 && (pVmxTransient->uExitQualification & 0xfff) == 0x80 /* TPR offset */
8710 && !(pVmxTransient->uExitIntrErrorCode & X86_TRAP_PF_P) /* Page not present */
8711 && CPUMGetGuestCPL(pVCpu) == 0 /* Requires CR0, EFLAGS, segments. */
8712 && !CPUMIsGuestInLongModeEx(pMixedCtx) /* Requires EFER. */
8713 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
8714 {
8715 RTGCPHYS GCPhys;
8716 RTGCPHYS GCPhysApicBase = (pMixedCtx->msrApicBase & PAGE_BASE_GC_MASK);
8717 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pVmxTransient->uExitQualification, NULL /* pfFlags */, &GCPhys);
8718 if ( rc == VINF_SUCCESS
8719 && GCPhys == GCPhysApicBase)
8720 {
8721 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
8722 AssertRCReturn(rc, rc);
8723
8724 /* Only attempt to patch the instruction once. */
8725 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pMixedCtx->eip);
8726 if (!pPatch)
8727 return VINF_EM_HM_PATCH_TPR_INSTR;
8728 }
8729 }
8730#endif
8731
8732 TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
8733 TRPMSetFaultAddress(pVCpu, pVmxTransient->uExitQualification);
8734 TRPMSetErrorCode(pVCpu, pVmxTransient->uExitIntrErrorCode);
8735
8736 rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
8737 AssertRCReturn(rc, rc);
8738
8739 /* Forward it to the trap handler first. */
8740 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
8741 (RTGCPTR)pVmxTransient->uExitQualification);
8742
8743 Log(("#PF: cr2=%RGv cs:rip=%04x:%RGv errorcode %#RX32 rc=%d\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,
8744 pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, rc));
8745
8746 if (rc == VINF_SUCCESS)
8747 {
8748 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
8749 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
8750 * memory? We don't update the whole state here... */
8751 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8752 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8753
8754 TRPMResetTrap(pVCpu);
8755 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8756 return rc;
8757 }
8758 else if (rc == VINF_EM_RAW_GUEST_TRAP)
8759 {
8760 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
8761 {
8762 /* It's a guest page fault and needs to be reflected to the guest. */
8763 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
8764 TRPMResetTrap(pVCpu);
8765 pMixedCtx->cr2 = pVmxTransient->uExitQualification;
8766 rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
8767 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8768 pVmxTransient->cbInstr, uGstErrorCode);
8769 AssertRCReturn(rc, rc);
8770 }
8771 else
8772 {
8773 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8774 Assert(!pVCpu->hm.s.Event.fPending);
8775 TRPMResetTrap(pVCpu);
8776 rc = hmR0VmxInjectXcptDF(pVM, pVCpu, pMixedCtx);
8777 }
8778 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8779 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
8780 return rc;
8781 }
8782
8783 TRPMResetTrap(pVCpu);
8784 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
8785 return rc;
8786}
8787
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette